ngram
listlengths
0
82k
[ "__init__(self, bot, cwd): \"\"\" Initialize class @param => DiscordBot: `bot`:", "class @param => DiscordBot: `bot`: Discord Bot Instance. @param =>", "=> str: `description`: Description of the news. @param => str:", "= key # For each sections for index_section, section in", "in the section for link in section[\"link\"]: # Get title,", "content of the requests content = requests.get(url).text # Parse the", "@{0}\".format(self.bot_username)) # Return the final Discord embeded message return news", "@param => str: `link`: Link of the rss feed. \"\"\"", "of the rss feed. @param => str: `url`: url of", "dict dict_news[str(index)] = args # Return the dict return dict_news", "launched if isinstance(wait_time, str): # Launch the function to clean", "Get the news of the rss feed. @param => str:", "self.json_rss[root][index_section][\"clean\"] = True # For each link in the section", "to the dict dict_news[str(index)] = args # Return the dict", "if the news already exists for value in dict_news.values(): #", "the Name, description and color on the left news =", "the feed is new. @param => str: `title`: Title of", "color) #Send to discord await self.rss_channel.send(embed=message) # Wait until the", "for key, sections in self.json_rss.items(): # Get the root name", "content = content + \"\\n\" + link news.add_field(name=title, value=content[:1024], inline=False)", "os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): \"\"\"", "Get the number of news news_number = len(parser) # Construct", "name = section[\"name\"] # Get the time until the cleaning", "Hash the description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # write", "= requests.get(url).text # Parse the content parser = feedparser.parse(content) #", "of main.py file. \"\"\" # Discord self.bot = bot self.bot_username", "of the requests content = requests.get(url).text # Parse the content", "@param => dict: `json_rss`: JSON data of the RSS Flux.", "to launch relaunch the requests self.json_rss[root][index_section][\"clean\"] = True # For", "of the news. @param => str: `description`: Description of the", "the news. @param => str: `link`: Link of the news.", "parser[\"entries\"] # Get the number of news news_number = len(parser)", "from fts.database import Database from fts.cleandatabase import CleanDatabase class FluxRSS:", "CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the variable", "relaunch the requests self.json_rss[root][index_section][\"clean\"] = True # For each link", "=> str: `name`: Name set in const. Categorie of the", "import requests import asyncio import discord import hashlib import os", "section[\"clean\"] # Check if the cleaning database is already launched", "= CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the", "str: `content`: Content description of the news. @param => str:", "connection is not closed while not self.bot.is_closed(): # For each", "asyncio import discord import hashlib import os from const import", "for the root and name given wait_time = section[\"clean\"] #", "JSON data of the RSS Flux. \"\"\" # Show const", "= content + \"\\n\" + link news.add_field(name=title, value=content[:1024], inline=False) #Show", "\"\"\" Return True if the news in the feed is", "Return the dict return dict_news def is_new(self, root, name, title,", "# Get the link link = parser[index][\"links\"][0][\"href\"] # Set list", "While the connection is not closed while not self.bot.is_closed(): #", "link = value[2] # Check if the news is new", "# For each link in the section for link in", "<gh_stars>0 #!/usr/bin/python3 from urllib.parse import urlparse import feedparser import requests", "requests import asyncio import discord import hashlib import os from", "in a list for the value. \"\"\" dict_news = dict()", "name, title, description, link): \"\"\" Return True if the news", "discord import hashlib import os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK,", "of the rss feed. \"\"\" # Hash description hash_description =", "title = parser[index][\"title\"] # Get the description description = parser[index][\"description\"]", "Website. @param => str: `name`: Name set in const. Categorie", "embeded_msg(self, root, name, title, content, link, color): \"\"\" Create the", "link in a dict dict_news = self.get_news(link) # Verify if", "= self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd # Database self.db_path", "\"\"\" # Hash description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() #", "Database from fts.cleandatabase import CleanDatabase class FluxRSS: \"\"\" Class of", "DiscordBot: `bot`: Discord Bot Instance. @param => str: `cwd`: Current", "Instance. @param => str: `cwd`: Current Working Directory of main.py", "self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd # Database self.db_path =", "WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database from fts.cleandatabase", "link link = parser[index][\"links\"][0][\"href\"] # Set list args = [", "# Return the check of the query return not self.database.isNewsExists(root,", "name given wait_time = section[\"clean\"] # Check if the cleaning", "name), description=\"News :\", color=(color or 0x00ff00)) #Set bot name and", "# Launch the function to clean the database Thread =", "of the rss feed. Return dict with an int index", "import hashlib import os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\", "`name`: Name set in const. Categorie of the news @param", "self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): \"\"\" Get the", "of FluxRSS. Get news of the feedrss url parse in", "dict() # Get the content of the requests content =", "# For each sections for index_section, section in enumerate(sections): #", "For each sections for index_section, section in enumerate(sections): # Check", "an int index key and title, description and link in", "link): # Hash the description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest()", "content parser = feedparser.parse(content) # Set the root parser =", "url parse in args. \"\"\" def __init__(self, bot, cwd): \"\"\"", "def get_news(self, url): \"\"\" Get the news of the rss", "final Discord embeded message return news async def feedrss(self, json_rss):", "args # Return the dict return dict_news def is_new(self, root,", "sections in self.json_rss.items(): # Get the root name set in", "in footer news.set_footer(text=\"Generate by @{0}\".format(self.bot_username)) # Return the final Discord", "description and color on the left news = discord.Embed(title=\"{0} -", "Bot Instance. @param => str: `cwd`: Current Working Directory of", "news news_number = len(parser) # Construct the dict for index", "key, sections in self.json_rss.items(): # Get the root name set", "feed. @param => str: `url`: url of the rss feed.", "already exists for value in dict_news.values(): # Get title title", "- {1}\".format(root, name), description=\"News :\", color=(color or 0x00ff00)) #Set bot", "news is new if self.is_new(root, name, title, description, link): #", "the news in the feed is new. @param => str:", "the dict for index in range(news_number): # Get the title", "variable type of the clean line in json_rss to launch", "index in range(news_number): # Get the title title = parser[index][\"title\"]", "if \"color\" in section[\"custom\"].keys(): color = getattr(discord.Color, section[\"custom\"][\"color\"])() else: color", "Get the link link = parser[index][\"links\"][0][\"href\"] # Set list args", "the RSS Flux. \"\"\" # Show const for the format", "Get title, description and link in a dict dict_news =", "the title title = parser[index][\"title\"] # Get the description description", "the channel. @param => dict: `json_rss`: JSON data of the", "dict for index in range(news_number): # Get the title title", "in enumerate(sections): # Check customization of the section if \"custom\"", "str: `name`: Name set in const. Categorie of the news", "= Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): \"\"\" Get the news", "dict_news.values(): # Get title title = value[0] # Get description", "news. @param => discord.Color: `color`: Color for the left panel.", "news. @param => str: `content`: Content description of the news.", "title, description, link): # Hash the description hash_description = hashlib.sha256(bytes(description,", "hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # write the news into", "`link`: Link of the rss feed. \"\"\" # Hash description", "database self.database.AddNews(root, name, title, hash_description, link) #Create the discord message", ":\", color=(color or 0x00ff00)) #Set bot name and profil picture", "for the value. \"\"\" dict_news = dict() # Get the", "\"utf-8\", errors=\"ignore\")).hexdigest() # Return the check of the query return", "feedrss url parse in args. \"\"\" def __init__(self, bot, cwd):", "discord.Embed(title=\"{0} - {1}\".format(root, name), description=\"News :\", color=(color or 0x00ff00)) #Set", "not self.bot.is_closed(): # For each key for key, sections in", "json_rss to launch relaunch the requests self.json_rss[root][index_section][\"clean\"] = True #", "# Add the list to the dict dict_news[str(index)] = args", "new if self.is_new(root, name, title, description, link): # Hash the", "hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # Return the check of the query", "description=\"News :\", color=(color or 0x00ff00)) #Set bot name and profil", "import os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME", "or 0x00ff00)) #Set bot name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url)", "`content`: Content description of the news. @param => str: `link`:", "the news is new if self.is_new(root, name, title, description, link):", "color if \"color\" in section[\"custom\"].keys(): color = getattr(discord.Color, section[\"custom\"][\"color\"])() else:", "=> str: `root`: Name of the Website. @param => str:", "str: `title`: Title of the news. @param => str: `description`:", "if self.is_new(root, name, title, description, link): # Hash the description", "urllib.parse import urlparse import feedparser import requests import asyncio import", "SQLITE_FILE_NAME) def get_news(self, url): \"\"\" Get the news of the", "number of news news_number = len(parser) # Construct the dict", "already launched if isinstance(wait_time, str): # Launch the function to", "in self.json_rss.items(): # Get the root name set in const", "self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd #", "description and the link for the main message content =", "Hash description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # Return the", "content + \"\\n\" + link news.add_field(name=title, value=content[:1024], inline=False) #Show the", "news = discord.Embed(title=\"{0} - {1}\".format(root, name), description=\"News :\", color=(color or", "#Set bot name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the", "\"\\n\" + link news.add_field(name=title, value=content[:1024], inline=False) #Show the bot username", "= section[\"clean\"] # Check if the cleaning database is already", "the database for the root and name given wait_time =", "in section[\"link\"]: # Get title, description and link in a", "Link of the news. @param => discord.Color: `color`: Color for", "=> str: `link`: Link of the news. @param => discord.Color:", "\\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database from fts.cleandatabase import", "until the cleaning of the database for the root and", "len(parser) # Construct the dict for index in range(news_number): #", "and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and the", "dict with an int index key and title, description and", "section name = section[\"name\"] # Get the time until the", "hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # Return the check of", "= section[\"name\"] # Get the time until the cleaning of", "dict return dict_news def is_new(self, root, name, title, description, link):", "content, link, color): \"\"\" Create the embeded message and send", "= value[0] # Get description description = value[1] # Get", "# Get the content of the requests content = requests.get(url).text", "description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # write the news", "the query return not self.database.isNewsExists(root, name, title, hash_description, link) def", "Check if the news is new if self.is_new(root, name, title,", "description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # Return the check", "section[\"custom\"][\"color\"])() else: color = False else: color = False #", "link link = value[2] # Check if the news is", "Return True if the news in the feed is new.", "if the news in the feed is new. @param =>", "title, description and link in a dict dict_news = self.get_news(link)", "dict_news = dict() # Get the content of the requests", "Initialize class @param => DiscordBot: `bot`: Discord Bot Instance. @param", "for index_section, section in enumerate(sections): # Check customization of the", "`title`: Title of the news. @param => str: `content`: Content", "the content parser = feedparser.parse(content) # Set the root parser", "the news already exists for value in dict_news.values(): # Get", "Get news of the feedrss url parse in args. \"\"\"", "the news of the rss feed. @param => str: `url`:", "of the section name = section[\"name\"] # Get the time", "write the news into the database self.database.AddNews(root, name, title, hash_description,", "\"\"\" Get the news and send it to the channel.", "Set list args = [ title, description, link ] #", "news @param => str: `title`: Title of the news. @param", "database is already launched if isinstance(wait_time, str): # Launch the", "section[\"custom\"].keys(): color = getattr(discord.Color, section[\"custom\"][\"color\"])() else: color = False else:", "the rss feed. \"\"\" # Hash description hash_description = hashlib.sha256(bytes(description,", "the left news = discord.Embed(title=\"{0} - {1}\".format(root, name), description=\"News :\",", "# Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME)", "# Check if the cleaning database is already launched if", "clean the database Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME)", "SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): \"\"\" Get", "= discord.Embed(title=\"{0} - {1}\".format(root, name), description=\"News :\", color=(color or 0x00ff00))", "parser = parser[\"entries\"] # Get the number of news news_number", "= self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd", "args = [ title, description, link ] # Add the", "the Website. @param => str: `name`: Name set in const.", "the database self.database.AddNews(root, name, title, hash_description, link) #Create the discord", "value[2] # Check if the news is new if self.is_new(root,", "self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd =", "`description`: Description of the news. @param => str: `link`: Link", "on the left news = discord.Embed(title=\"{0} - {1}\".format(root, name), description=\"News", "function to clean the database Thread = CleanDatabase(root, name, wait_time,", "news in the feed is new. @param => str: `title`:", "\"\"\" Initialize class @param => DiscordBot: `bot`: Discord Bot Instance.", "= bot self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path", "news of the rss feed. @param => str: `url`: url", "the link link = parser[index][\"links\"][0][\"href\"] # Set list args =", "name of the section name = section[\"name\"] # Get the", "bot, cwd): \"\"\" Initialize class @param => DiscordBot: `bot`: Discord", "and send it to the channel. @param => dict: `json_rss`:", "while not self.bot.is_closed(): # For each key for key, sections", "username in footer news.set_footer(text=\"Generate by @{0}\".format(self.bot_username)) # Return the final", "# Return the dict return dict_news def is_new(self, root, name,", "root, name, title, content, link, color): \"\"\" Create the embeded", "news async def feedrss(self, json_rss): \"\"\" Get the news and", "Title of the news. @param => str: `description`: Description of", "of the database for the root and name given wait_time", "bot name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description", "name set in const root = key # For each", "the rss feed. Return dict with an int index key", "section in enumerate(sections): # Check customization of the section if", "= value[1] # Get link link = value[2] # Check", "link = parser[index][\"links\"][0][\"href\"] # Set list args = [ title,", "root, name, title, description, link): \"\"\" Return True if the", "def __init__(self, bot, cwd): \"\"\" Initialize class @param => DiscordBot:", "import feedparser import requests import asyncio import discord import hashlib", "def is_new(self, root, name, title, description, link): \"\"\" Return True", "SQLITE_FILE_NAME) Thread.start() # Change the variable type of the clean", "a dict dict_news = self.get_news(link) # Verify if the news", "link in section[\"link\"]: # Get title, description and link in", "const root = key # For each sections for index_section,", "message and send it to discord. @param => str: `root`:", "description, link): \"\"\" Return True if the news in the", "each link in the section for link in section[\"link\"]: #", "the requests self.json_rss[root][index_section][\"clean\"] = True # For each link in", "# Hash the description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() #", "exists for value in dict_news.values(): # Get title title =", "Name, description and color on the left news = discord.Embed(title=\"{0}", "if the news is new if self.is_new(root, name, title, description,", "discord message message = self.embeded_msg(root, name, title, description, link, color)", "False else: color = False # Get the name of", "= hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # Return the check of the", "= self.get_news(link) # Verify if the news already exists for", "= hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # write the news into the", "cleaning database is already launched if isinstance(wait_time, str): # Launch", "description = value[1] # Get link link = value[2] #", "the function to clean the database Thread = CleanDatabase(root, name,", "for the format self.json_rss = json_rss # While the connection", "# While the connection is not closed while not self.bot.is_closed():", "title, description and link in a list for the value.", "the check of the query return not self.database.isNewsExists(root, name, title,", "Show const for the format self.json_rss = json_rss # While", "@param => str: `url`: url of the rss feed. Return", "# Set list args = [ title, description, link ]", "self.embeded_msg(root, name, title, description, link, color) #Send to discord await", "description, link): # Hash the description hash_description = hashlib.sha256(bytes(description, \"utf-8\",", "given wait_time = section[\"clean\"] # Check if the cleaning database", "str): # Launch the function to clean the database Thread", "{1}\".format(root, name), description=\"News :\", color=(color or 0x00ff00)) #Set bot name", "section.keys(): # Check color if \"color\" in section[\"custom\"].keys(): color =", "the discord message message = self.embeded_msg(root, name, title, description, link,", "news.set_footer(text=\"Generate by @{0}\".format(self.bot_username)) # Return the final Discord embeded message", "#!/usr/bin/python3 from urllib.parse import urlparse import feedparser import requests import", "else: color = False else: color = False # Get", "in dict_news.values(): # Get title title = value[0] # Get", "link) def embeded_msg(self, root, name, title, content, link, color): \"\"\"", "# write the news into the database self.database.AddNews(root, name, title,", "# Get the root name set in const root =", "the cleaning of the database for the root and name", "CleanDatabase class FluxRSS: \"\"\" Class of FluxRSS. Get news of", "cleaning of the database for the root and name given", "is new. @param => str: `title`: Title of the news.", "description description = parser[index][\"description\"] # Get the link link =", "color): \"\"\" Create the embeded message and send it to", "dict_news = self.get_news(link) # Verify if the news already exists", "news already exists for value in dict_news.values(): # Get title", "the section name = section[\"name\"] # Get the time until", "`bot`: Discord Bot Instance. @param => str: `cwd`: Current Working", "root = key # For each sections for index_section, section", "the embeded message and send it to discord. @param =>", "# Get the time until the cleaning of the database", "link): \"\"\" Return True if the news in the feed", "def embeded_msg(self, root, name, title, content, link, color): \"\"\" Create", "= parser[\"entries\"] # Get the number of news news_number =", "Categorie of the news @param => str: `title`: Title of", "name, title, hash_description, link) def embeded_msg(self, root, name, title, content,", "in section[\"custom\"].keys(): color = getattr(discord.Color, section[\"custom\"][\"color\"])() else: color = False", "value[0] # Get description description = value[1] # Get link", "the link for the main message content = content +", "cwd): \"\"\" Initialize class @param => DiscordBot: `bot`: Discord Bot", "[ title, description, link ] # Add the list to", "cwd # Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path,", "import Database from fts.cleandatabase import CleanDatabase class FluxRSS: \"\"\" Class", "and link in a list for the value. \"\"\" dict_news", "the final Discord embeded message return news async def feedrss(self,", "\"\"\" Get the news of the rss feed. @param =>", "self.is_new(root, name, title, description, link): # Hash the description hash_description", "\"\"\" # Show const for the format self.json_rss = json_rss", "description description = value[1] # Get link link = value[2]", "FluxRSS: \"\"\" Class of FluxRSS. Get news of the feedrss", "# Get the number of news news_number = len(parser) #", "=> dict: `json_rss`: JSON data of the RSS Flux. \"\"\"", "the root and name given wait_time = section[\"clean\"] # Check", "and send it to discord. @param => str: `root`: Name", "Check color if \"color\" in section[\"custom\"].keys(): color = getattr(discord.Color, section[\"custom\"][\"color\"])()", "picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and the link for", "discord await self.rss_channel.send(embed=message) # Wait until the next verification await", "= parser[index][\"description\"] # Get the link link = parser[index][\"links\"][0][\"href\"] #", "hash_description, link) #Create the discord message message = self.embeded_msg(root, name,", "@param => discord.Color: `color`: Color for the left panel. \"\"\"", "@param => str: `root`: Name of the Website. @param =>", "const. Categorie of the news @param => str: `title`: Title", "data of the RSS Flux. \"\"\" # Show const for", "async def feedrss(self, json_rss): \"\"\" Get the news and send", "enumerate(sections): # Check customization of the section if \"custom\" in", "description = parser[index][\"description\"] # Get the link link = parser[index][\"links\"][0][\"href\"]", "and color on the left news = discord.Embed(title=\"{0} - {1}\".format(root,", "# Get title title = value[0] # Get description description", "# Parse the content parser = feedparser.parse(content) # Set the", "set in const root = key # For each sections", "import discord import hashlib import os from const import CHANNEL_RSS,", "# Get the title title = parser[index][\"title\"] # Get the", "= getattr(discord.Color, section[\"custom\"][\"color\"])() else: color = False else: color =", "the news into the database self.database.AddNews(root, name, title, hash_description, link)", "await self.rss_channel.send(embed=message) # Wait until the next verification await asyncio.sleep(WAIT_UNTIL_NEW_CHECK)", "the news and send it to the channel. @param =>", "in section.keys(): # Check color if \"color\" in section[\"custom\"].keys(): color", "title, description, link, color) #Send to discord await self.rss_channel.send(embed=message) #", "\"\"\" Create the embeded message and send it to discord.", "Verify if the news already exists for value in dict_news.values():", "into the database self.database.AddNews(root, name, title, hash_description, link) #Create the", "str: `root`: Name of the Website. @param => str: `name`:", "description and link in a list for the value. \"\"\"", "rss feed. \"\"\" # Hash description hash_description = hashlib.sha256(bytes(description, \"utf-8\",", "=> str: `link`: Link of the rss feed. \"\"\" #", "@param => str: `link`: Link of the news. @param =>", "=> str: `title`: Title of the news. @param => str:", "the news. @param => str: `link`: Link of the rss", "#Set the description and the link for the main message", "For each key for key, sections in self.json_rss.items(): # Get", "@param => str: `content`: Content description of the news. @param", "Parse the content parser = feedparser.parse(content) # Set the root", "name, title, description, link, color) #Send to discord await self.rss_channel.send(embed=message)", "the clean line in json_rss to launch relaunch the requests", "name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the variable type", "Discord embeded message return news async def feedrss(self, json_rss): \"\"\"", "SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database from fts.cleandatabase import CleanDatabase", "CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database from", "if \"custom\" in section.keys(): # Check color if \"color\" in", "the bot username in footer news.set_footer(text=\"Generate by @{0}\".format(self.bot_username)) # Return", "const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import", "of the news. @param => discord.Color: `color`: Color for the", "title title = value[0] # Get description description = value[1]", "= parser[index][\"title\"] # Get the description description = parser[index][\"description\"] #", "the database Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start()", "each key for key, sections in self.json_rss.items(): # Get the", "@param => str: `title`: Title of the news. @param =>", "RSS Flux. \"\"\" # Show const for the format self.json_rss", "value=content[:1024], inline=False) #Show the bot username in footer news.set_footer(text=\"Generate by", "database Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() #", "from urllib.parse import urlparse import feedparser import requests import asyncio", "the connection is not closed while not self.bot.is_closed(): # For", "class FluxRSS: \"\"\" Class of FluxRSS. Get news of the", "True # For each link in the section for link", "Add the list to the dict dict_news[str(index)] = args #", "\"utf-8\", errors=\"ignore\")).hexdigest() # write the news into the database self.database.AddNews(root,", "in range(news_number): # Get the title title = parser[index][\"title\"] #", "and the link for the main message content = content", "# Show const for the format self.json_rss = json_rss #", "the value. \"\"\" dict_news = dict() # Get the content", "root name set in const root = key # For", "parser[index][\"links\"][0][\"href\"] # Set list args = [ title, description, link", "str: `link`: Link of the rss feed. \"\"\" # Hash", "description of the news. @param => str: `link`: Link of", "wait_time = section[\"clean\"] # Check if the cleaning database is", "message content = content + \"\\n\" + link news.add_field(name=title, value=content[:1024],", "section[\"link\"]: # Get title, description and link in a dict", "description, link ] # Add the list to the dict", "news. @param => str: `link`: Link of the news. @param", "=> str: `content`: Content description of the news. @param =>", "Get the description description = parser[index][\"description\"] # Get the link", "of the news @param => str: `title`: Title of the", "# Hash description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # Return", "color = False else: color = False # Get the", "news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and the link for the", "channel. @param => dict: `json_rss`: JSON data of the RSS", "json_rss): \"\"\" Get the news and send it to the", "Create the embeded message and send it to discord. @param", "# Construct the dict for index in range(news_number): # Get", "Get the title title = parser[index][\"title\"] # Get the description", "rss feed. @param => str: `url`: url of the rss", "def feedrss(self, json_rss): \"\"\" Get the news and send it", "# Set the root parser = parser[\"entries\"] # Get the", "customization of the section if \"custom\" in section.keys(): # Check", "the content of the requests content = requests.get(url).text # Parse", "the description description = parser[index][\"description\"] # Get the link link", "json_rss # While the connection is not closed while not", "icon_url=self.bot.user.avatar_url) #Set the description and the link for the main", "feedparser import requests import asyncio import discord import hashlib import", "str: `cwd`: Current Working Directory of main.py file. \"\"\" #", "fts.cleandatabase import CleanDatabase class FluxRSS: \"\"\" Class of FluxRSS. Get", "Return the final Discord embeded message return news async def", "list args = [ title, description, link ] # Add", "import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database", "message return news async def feedrss(self, json_rss): \"\"\" Get the", "not closed while not self.bot.is_closed(): # For each key for", "\"\"\" dict_news = dict() # Get the content of the", "return not self.database.isNewsExists(root, name, title, hash_description, link) def embeded_msg(self, root,", "Discord self.bot = bot self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS)", "content = requests.get(url).text # Parse the content parser = feedparser.parse(content)", "by @{0}\".format(self.bot_username)) # Return the final Discord embeded message return", "Current Working Directory of main.py file. \"\"\" # Discord self.bot", "title title = parser[index][\"title\"] # Get the description description =", "import urlparse import feedparser import requests import asyncio import discord", "the news. @param => str: `content`: Content description of the", "+ \"\\n\" + link news.add_field(name=title, value=content[:1024], inline=False) #Show the bot", "return news async def feedrss(self, json_rss): \"\"\" Get the news", "value in dict_news.values(): # Get title title = value[0] #", "launch relaunch the requests self.json_rss[root][index_section][\"clean\"] = True # For each", "self.json_rss.items(): # Get the root name set in const root", "url): \"\"\" Get the news of the rss feed. @param", "bot self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd", "title, hash_description, link) def embeded_msg(self, root, name, title, content, link,", "self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd # Database", "Directory of main.py file. \"\"\" # Discord self.bot = bot", "the root parser = parser[\"entries\"] # Get the number of", "#Show the bot username in footer news.set_footer(text=\"Generate by @{0}\".format(self.bot_username)) #", "Get title title = value[0] # Get description description =", "link for the main message content = content + \"\\n\"", "and link in a dict dict_news = self.get_news(link) # Verify", "send it to the channel. @param => dict: `json_rss`: JSON", "= os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url):", "for the main message content = content + \"\\n\" +", "Return dict with an int index key and title, description", "news_number = len(parser) # Construct the dict for index in", "Get the news and send it to the channel. @param", "bot username in footer news.set_footer(text=\"Generate by @{0}\".format(self.bot_username)) # Return the", "a list for the value. \"\"\" dict_news = dict() #", "clean line in json_rss to launch relaunch the requests self.json_rss[root][index_section][\"clean\"]", "self.get_news(link) # Verify if the news already exists for value", "Check customization of the section if \"custom\" in section.keys(): #", "message message = self.embeded_msg(root, name, title, description, link, color) #Send", "new. @param => str: `title`: Title of the news. @param", "of the RSS Flux. \"\"\" # Show const for the", "not self.database.isNewsExists(root, name, title, hash_description, link) def embeded_msg(self, root, name,", "@param => str: `description`: Description of the news. @param =>", "to the channel. @param => dict: `json_rss`: JSON data of", "key for key, sections in self.json_rss.items(): # Get the root", "= json_rss # While the connection is not closed while", "of the clean line in json_rss to launch relaunch the", "`cwd`: Current Working Directory of main.py file. \"\"\" # Discord", "from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database", "sections for index_section, section in enumerate(sections): # Check customization of", "feedparser.parse(content) # Set the root parser = parser[\"entries\"] # Get", "`title`: Title of the news. @param => str: `description`: Description", "const for the format self.json_rss = json_rss # While the", "news. @param => str: `description`: Description of the news. @param", "requests self.json_rss[root][index_section][\"clean\"] = True # For each link in the", "`json_rss`: JSON data of the RSS Flux. \"\"\" # Show", "parser = feedparser.parse(content) # Set the root parser = parser[\"entries\"]", "the left panel. \"\"\" # Set the Name, description and", "Color for the left panel. \"\"\" # Set the Name,", "set in const. Categorie of the news @param => str:", "Name set in const. Categorie of the news @param =>", "self.bot = bot self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) #", "title, description, link ] # Add the list to the", "Class of FluxRSS. Get news of the feedrss url parse", "send it to discord. @param => str: `root`: Name of", "and title, description and link in a list for the", "is already launched if isinstance(wait_time, str): # Launch the function", "Path self.cwd = cwd # Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME)", "# For each key for key, sections in self.json_rss.items(): #", "# Path self.cwd = cwd # Database self.db_path = os.path.join(self.cwd,", "= len(parser) # Construct the dict for index in range(news_number):", "] # Add the list to the dict dict_news[str(index)] =", "Get link link = value[2] # Check if the news", "url of the rss feed. Return dict with an int", "of the Website. @param => str: `name`: Name set in", "in the feed is new. @param => str: `title`: Title", "Flux. \"\"\" # Show const for the format self.json_rss =", "list for the value. \"\"\" dict_news = dict() # Get", "Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change", "index key and title, description and link in a list", "the news. @param => discord.Color: `color`: Color for the left", "discord. @param => str: `root`: Name of the Website. @param", "\"\"\" Class of FluxRSS. Get news of the feedrss url", "Get the time until the cleaning of the database for", "hash_description, link) def embeded_msg(self, root, name, title, content, link, color):", "the time until the cleaning of the database for the", "link, color): \"\"\" Create the embeded message and send it", "= False else: color = False # Get the name", "Title of the news. @param => str: `content`: Content description", "news. @param => str: `link`: Link of the rss feed.", "list to the dict dict_news[str(index)] = args # Return the", "=> discord.Color: `color`: Color for the left panel. \"\"\" #", "FluxRSS. Get news of the feedrss url parse in args.", "Name of the Website. @param => str: `name`: Name set", "section if \"custom\" in section.keys(): # Check color if \"color\"", "the requests content = requests.get(url).text # Parse the content parser", "if isinstance(wait_time, str): # Launch the function to clean the", "link, color) #Send to discord await self.rss_channel.send(embed=message) # Wait until", "= [ title, description, link ] # Add the list", "line in json_rss to launch relaunch the requests self.json_rss[root][index_section][\"clean\"] =", "Working Directory of main.py file. \"\"\" # Discord self.bot =", "#Send to discord await self.rss_channel.send(embed=message) # Wait until the next", "is new if self.is_new(root, name, title, description, link): # Hash", "closed while not self.bot.is_closed(): # For each key for key,", "it to discord. @param => str: `root`: Name of the", "# Get title, description and link in a dict dict_news", "query return not self.database.isNewsExists(root, name, title, hash_description, link) def embeded_msg(self,", "title, description, link): \"\"\" Return True if the news in", "main.py file. \"\"\" # Discord self.bot = bot self.bot_username =", "feed. Return dict with an int index key and title,", "`url`: url of the rss feed. Return dict with an", "= value[2] # Check if the news is new if", "@param => str: `cwd`: Current Working Directory of main.py file.", "format self.json_rss = json_rss # While the connection is not", "is_new(self, root, name, title, description, link): \"\"\" Return True if", "if the cleaning database is already launched if isinstance(wait_time, str):", "self.bot.is_closed(): # For each key for key, sections in self.json_rss.items():", "Change the variable type of the clean line in json_rss", "parser[index][\"description\"] # Get the link link = parser[index][\"links\"][0][\"href\"] # Set", "# Check if the news is new if self.is_new(root, name,", "getattr(discord.Color, section[\"custom\"][\"color\"])() else: color = False else: color = False", "= self.embeded_msg(root, name, title, description, link, color) #Send to discord", "import CleanDatabase class FluxRSS: \"\"\" Class of FluxRSS. Get news", "\"custom\" in section.keys(): # Check color if \"color\" in section[\"custom\"].keys():", "@param => DiscordBot: `bot`: Discord Bot Instance. @param => str:", "embeded message return news async def feedrss(self, json_rss): \"\"\" Get", "# Verify if the news already exists for value in", "the list to the dict dict_news[str(index)] = args # Return", "news and send it to the channel. @param => dict:", "+ link news.add_field(name=title, value=content[:1024], inline=False) #Show the bot username in", "str: `title`: Title of the news. @param => str: `content`:", "name, title, description, link): # Hash the description hash_description =", "the feedrss url parse in args. \"\"\" def __init__(self, bot,", "file. \"\"\" # Discord self.bot = bot self.bot_username = self.bot.user.name", "`color`: Color for the left panel. \"\"\" # Set the", "message = self.embeded_msg(root, name, title, description, link, color) #Send to", "str: `description`: Description of the news. @param => str: `link`:", "errors=\"ignore\")).hexdigest() # write the news into the database self.database.AddNews(root, name,", "get_news(self, url): \"\"\" Get the news of the rss feed.", "news.add_field(name=title, value=content[:1024], inline=False) #Show the bot username in footer news.set_footer(text=\"Generate", "dict: `json_rss`: JSON data of the RSS Flux. \"\"\" #", "= cwd # Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database =", "int index key and title, description and link in a", "Get the name of the section name = section[\"name\"] #", "# Set the Name, description and color on the left", "return dict_news def is_new(self, root, name, title, description, link): \"\"\"", "the description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # write the", "=> str: `url`: url of the rss feed. Return dict", "self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self,", "dict_news[str(index)] = args # Return the dict return dict_news def", "Thread.start() # Change the variable type of the clean line", "link) #Create the discord message message = self.embeded_msg(root, name, title,", "to clean the database Thread = CleanDatabase(root, name, wait_time, self.db_path,", "Get the root name set in const root = key", "key and title, description and link in a list for", "link in a list for the value. \"\"\" dict_news =", "else: color = False # Get the name of the", "isinstance(wait_time, str): # Launch the function to clean the database", "Check if the cleaning database is already launched if isinstance(wait_time,", "section[\"name\"] # Get the time until the cleaning of the", "the news. @param => str: `description`: Description of the news.", "description and link in a dict dict_news = self.get_news(link) #", "wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the variable type of", "\"\"\" def __init__(self, bot, cwd): \"\"\" Initialize class @param =>", "link in the section for link in section[\"link\"]: # Get", "#Create the discord message message = self.embeded_msg(root, name, title, description,", "requests.get(url).text # Parse the content parser = feedparser.parse(content) # Set", "= feedparser.parse(content) # Set the root parser = parser[\"entries\"] #", "# Get description description = value[1] # Get link link", "Link of the rss feed. \"\"\" # Hash description hash_description", "time until the cleaning of the database for the root", "key # For each sections for index_section, section in enumerate(sections):", "the number of news news_number = len(parser) # Construct the", "color = False # Get the name of the section", "of the query return not self.database.isNewsExists(root, name, title, hash_description, link)", "in const root = key # For each sections for", "the dict dict_news[str(index)] = args # Return the dict return", "left panel. \"\"\" # Set the Name, description and color", "news of the feedrss url parse in args. \"\"\" def", "dict_news def is_new(self, root, name, title, description, link): \"\"\" Return", "left news = discord.Embed(title=\"{0} - {1}\".format(root, name), description=\"News :\", color=(color", "title = value[0] # Get description description = value[1] #", "Content description of the news. @param => str: `link`: Link", "\"\"\" # Set the Name, description and color on the", "the section if \"custom\" in section.keys(): # Check color if", "database for the root and name given wait_time = section[\"clean\"]", "import asyncio import discord import hashlib import os from const", "=> DiscordBot: `bot`: Discord Bot Instance. @param => str: `cwd`:", "main message content = content + \"\\n\" + link news.add_field(name=title,", "hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest() # write the news into the database", "inline=False) #Show the bot username in footer news.set_footer(text=\"Generate by @{0}\".format(self.bot_username))", "SQLITE_FILE_NAME from fts.database import Database from fts.cleandatabase import CleanDatabase class", "requests content = requests.get(url).text # Parse the content parser =", "Discord Bot Instance. @param => str: `cwd`: Current Working Directory", "rss feed. Return dict with an int index key and", "for index in range(news_number): # Get the title title =", "self.cwd = cwd # Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database", "=> str: `cwd`: Current Working Directory of main.py file. \"\"\"", "it to the channel. @param => dict: `json_rss`: JSON data", "in a dict dict_news = self.get_news(link) # Verify if the", "hashlib import os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME,", "of news news_number = len(parser) # Construct the dict for", "news into the database self.database.AddNews(root, name, title, hash_description, link) #Create", "section for link in section[\"link\"]: # Get title, description and", "range(news_number): # Get the title title = parser[index][\"title\"] # Get", "parser[index][\"title\"] # Get the description description = parser[index][\"description\"] # Get", "of the section if \"custom\" in section.keys(): # Check color", "Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def", "the cleaning database is already launched if isinstance(wait_time, str): #", "the rss feed. @param => str: `url`: url of the", "= args # Return the dict return dict_news def is_new(self,", "in args. \"\"\" def __init__(self, bot, cwd): \"\"\" Initialize class", "with an int index key and title, description and link", "the dict return dict_news def is_new(self, root, name, title, description,", "str: `url`: url of the rss feed. Return dict with", "to discord. @param => str: `root`: Name of the Website.", "type of the clean line in json_rss to launch relaunch", "False # Get the name of the section name =", "self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the variable type of the", "self.database.isNewsExists(root, name, title, hash_description, link) def embeded_msg(self, root, name, title,", "color = getattr(discord.Color, section[\"custom\"][\"color\"])() else: color = False else: color", "`root`: Name of the Website. @param => str: `name`: Name", "self.json_rss = json_rss # While the connection is not closed", "# Get link link = value[2] # Check if the", "dict dict_news = self.get_news(link) # Verify if the news already", "# Discord self.bot = bot self.bot_username = self.bot.user.name self.rss_channel =", "discord.Color: `color`: Color for the left panel. \"\"\" # Set", "the name of the section name = section[\"name\"] # Get", "link news.add_field(name=title, value=content[:1024], inline=False) #Show the bot username in footer", "\"color\" in section[\"custom\"].keys(): color = getattr(discord.Color, section[\"custom\"][\"color\"])() else: color =", "# Get the description description = parser[index][\"description\"] # Get the", "os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \\ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from", "index_section, section in enumerate(sections): # Check customization of the section", "Description of the news. @param => str: `link`: Link of", "feed is new. @param => str: `title`: Title of the", "For each link in the section for link in section[\"link\"]:", "root parser = parser[\"entries\"] # Get the number of news", "fts.database import Database from fts.cleandatabase import CleanDatabase class FluxRSS: \"\"\"", "# Get the name of the section name = section[\"name\"]", "of the news. @param => str: `link`: Link of the", "= parser[index][\"links\"][0][\"href\"] # Set list args = [ title, description,", "0x00ff00)) #Set bot name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set", "Construct the dict for index in range(news_number): # Get the", "feedrss(self, json_rss): \"\"\" Get the news and send it to", "name, title, content, link, color): \"\"\" Create the embeded message", "to discord await self.rss_channel.send(embed=message) # Wait until the next verification", "Set the root parser = parser[\"entries\"] # Get the number", "parse in args. \"\"\" def __init__(self, bot, cwd): \"\"\" Initialize", "footer news.set_footer(text=\"Generate by @{0}\".format(self.bot_username)) # Return the final Discord embeded", "in const. Categorie of the news @param => str: `title`:", "of the feedrss url parse in args. \"\"\" def __init__(self,", "name, title, hash_description, link) #Create the discord message message =", "color=(color or 0x00ff00)) #Set bot name and profil picture news.set_author(name=self.bot_username,", "embeded message and send it to discord. @param => str:", "name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and", "the section for link in section[\"link\"]: # Get title, description", "title, hash_description, link) #Create the discord message message = self.embeded_msg(root,", "the format self.json_rss = json_rss # While the connection is", "each sections for index_section, section in enumerate(sections): # Check customization", "and name given wait_time = section[\"clean\"] # Check if the", "the description and the link for the main message content", "urlparse import feedparser import requests import asyncio import discord import", "Get description description = value[1] # Get link link =", "the variable type of the clean line in json_rss to", "str: `link`: Link of the news. @param => discord.Color: `color`:", "@param => str: `name`: Name set in const. Categorie of", "# Change the variable type of the clean line in", "title, content, link, color): \"\"\" Create the embeded message and", "is not closed while not self.bot.is_closed(): # For each key", "= False # Get the name of the section name", "link ] # Add the list to the dict dict_news[str(index)]", "value[1] # Get link link = value[2] # Check if", "# Return the final Discord embeded message return news async", "errors=\"ignore\")).hexdigest() # Return the check of the query return not", "check of the query return not self.database.isNewsExists(root, name, title, hash_description,", "\"\"\" # Discord self.bot = bot self.bot_username = self.bot.user.name self.rss_channel", "description, link, color) #Send to discord await self.rss_channel.send(embed=message) # Wait", "= True # For each link in the section for", "self.database.AddNews(root, name, title, hash_description, link) #Create the discord message message", "the news @param => str: `title`: Title of the news.", "for link in section[\"link\"]: # Get title, description and link", "in json_rss to launch relaunch the requests self.json_rss[root][index_section][\"clean\"] = True", "for value in dict_news.values(): # Get title title = value[0]", "Launch the function to clean the database Thread = CleanDatabase(root,", "Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): \"\"\" Get the news of", "`link`: Link of the news. @param => discord.Color: `color`: Color", "# Check customization of the section if \"custom\" in section.keys():", "of the news. @param => str: `content`: Content description of", "for the left panel. \"\"\" # Set the Name, description", "True if the news in the feed is new. @param", "args. \"\"\" def __init__(self, bot, cwd): \"\"\" Initialize class @param", "= dict() # Get the content of the requests content", "profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and the link", "color on the left news = discord.Embed(title=\"{0} - {1}\".format(root, name),", "the main message content = content + \"\\n\" + link", "Set the Name, description and color on the left news", "from fts.cleandatabase import CleanDatabase class FluxRSS: \"\"\" Class of FluxRSS.", "Get the content of the requests content = requests.get(url).text #", "feed. \"\"\" # Hash description hash_description = hashlib.sha256(bytes(description, \"utf-8\", errors=\"ignore\")).hexdigest()", "the root name set in const root = key #", "root and name given wait_time = section[\"clean\"] # Check if", "value. \"\"\" dict_news = dict() # Get the content of", "# Check color if \"color\" in section[\"custom\"].keys(): color = getattr(discord.Color,", "panel. \"\"\" # Set the Name, description and color on", "Return the check of the query return not self.database.isNewsExists(root, name," ]
[ "= self._create_player_logger(log_level) # pyre-ignore if start_listening: self._listening_coroutine = ensure_future(self.listen()) async", "@property def logger(self) -> Logger: # pyre-ignore \"\"\"Logger associated with", "\"\"\" return self._logger @property def username(self) -> str: \"\"\"The player's", "- %(levelname)s - %(message)s\" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async", "assertion = \"\" await self._send_message(f\"/trn {self._username},0,{assertion}\") await self._change_avatar(self._avatar) async def", "for communicating with showdown servers. \"\"\" import json import logging", "import Lock from asyncio import sleep from time import perf_counter", "== \"updateuser\": if split_message[2] == \" \" + self._username: #", "finally: for coroutine in coroutines: coroutine.cancel() async def stop_listening(self) ->", "username. :return: The player's username. :rtype: str \"\"\" return self._username", "%s\" % username) async def _challenge(self, username: str, format_: str):", "time import perf_counter from typing import List from typing import", "Showdown websocket messages are pipe-separated sequences split_message = message.split(\"|\") assert", ":type start_listening: bool \"\"\" self._authentication_url = server_configuration.authentication_url self._avatar = avatar", "self._create_player_logger(log_level) # pyre-ignore if start_listening: self._listening_coroutine = ensure_future(self.listen()) async def", "*, avatar: Optional[int] = None, log_level: Optional[int] = None, server_configuration:", "logger.setLevel(log_level) formatter = logging.Formatter( \"%(asctime)s - %(name)s - %(levelname)s -", "= json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing authentication request\") assertion = \"\" await", "with the player. :return: The logger. :rtype: Logger \"\"\" return", "json import logging import requests import websockets # pyre-ignore from", "message: %s\" % message) except CancelledError as e: self.logger.critical(\"CancelledError intercepted.", "= \"\", message_2: Optional[str] = None ) -> None: \"\"\"Sends", "log_level: The player's logger level. :type log_level: int. Defaults to", "showdown websocket\") coroutines = [] try: async with websockets.connect( self.websocket_url,", "send. :type message: str :param room: The room to which", "def _wait_for_login( self, checking_interval: float = 0.001, wait_for: int =", "base class for communicating with showdown servers. \"\"\" import json", "import Optional from aiologger import Logger # pyre-ignore from poke_env.exceptions", "_update_challenges(self, split_message: List[str]) -> None: \"\"\"Abstract method. Implementation should keep", "class for communicating with showdown servers. \"\"\" import json import", "self._change_avatar(self._avatar) async def _search_ladder_game(self, format_): await self._set_team() await self._send_message(f\"/search {format_}\")", "float = 0.001, wait_for: int = 5 ) -> None:", "__init__( self, player_configuration: PlayerConfiguration, *, avatar: Optional[int] = None, log_level:", "server_configuration: ServerConfiguration, start_listening: bool = True, ) -> None: \"\"\"", "and the player's username before messages. :param log_level: The logger's", "%s\", message) raise NotImplementedError(\"Unhandled message: %s\" % message) except CancelledError", "%s\", message) raise ShowdownException(\"Error message received: %s\", message) elif split_message[1]", "self._avatar = avatar self._password = player_configuration.password self._username = player_configuration.username self._server_url", "avatar id. If None, nothing happens. :type avatar_id: int \"\"\"", "self.username, split_message[2], ) elif \"updatechallenges\" in split_message[1]: # Contain information", "player_configuration.username self._server_url = server_configuration.server_url self._logged_in: Event = Event() self._sending_lock =", "elif split_message[1] == \"pm\": self.logger.info(\"Received pm: %s\", split_message) else: self.logger.critical(\"Unhandled", "await self._websocket.send(to_send) self.logger.info(\">>> %s\", to_send) async def _set_team(self): if self._team", "communicating with showdown servers. Also implements some higher level methods", "None: await self._send_message(\"/utm %s\" % self._team.yield_team()) async def _wait_for_login( self,", "self._websocket.close() @abstractmethod async def _handle_battle_message(self, message: str) -> None: \"\"\"Abstract", "challenges. \"\"\" @property def logged_in(self) -> Event: \"\"\"Event object associated", "listening to showdown websocket\") coroutines = [] try: async with", "perf_counter() while perf_counter() - start < wait_for: await sleep(checking_interval) if", "- %(message)s\" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async def _handle_message(self,", "# Battle update await self._handle_battle_message(message) elif split_message[1] == \"updatesearch\": self.logger.debug(\"Ignored", "we can login await self._log_in(split_message) elif split_message[1] == \"updateuser\": if", "perf_counter() - start < wait_for: await sleep(checking_interval) if self.logged_in: return", "assert len(split_message) > 1 # The type of message is", "poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC): \"\"\" Network interface of a", "basic tasks, such as changing avatar and low-level message handling.", "str: \"\"\"The websocket url. It is derived from the server", "async def _log_in(self, split_message: List[str]) -> None: \"\"\"Log the player", "message: %s\", message) raise NotImplementedError(\"Unhandled message: %s\" % message) except", "message: str) -> None: \"\"\"Abstract method. Implementation should redirect messages", "pyre-ignore if start_listening: self._listening_coroutine = ensure_future(self.listen()) async def _accept_challenge(self, username:", "for communicating with showdown servers. Also implements some higher level", "await self._wait_for_login() if avatar_id is not None: await self._send_message(f\"/avatar {avatar_id}\")", "before messages. :param log_level: The logger's level. :type log_level: int", "\"Unhandled exception raised while handling message:\\n%s\", message ) raise exception", "aiologger import Logger # pyre-ignore from poke_env.exceptions import ShowdownException from", "this problem.\"\"\", self.username, split_message[2], ) elif \"updatechallenges\" in split_message[1]: #", "True. :type start_listening: bool \"\"\" self._authentication_url = server_configuration.authentication_url self._avatar =", "\"popup\": self.logger.warning(\"Popup message received: %s\", message) elif split_message[1] in [\"nametaken\"]:", ":type message: str \"\"\" try: self.logger.debug(\"Received message to handle: %s\",", ":return: The logger. :rtype: Logger \"\"\" return self._logger @property def", "{username}, {format_}\") async def _change_avatar(self, avatar_id: Optional[int]) -> None: \"\"\"Changes", "self._password = player_configuration.password self._username = player_configuration.username self._server_url = server_configuration.server_url self._logged_in:", "is not None: logger.setLevel(log_level) formatter = logging.Formatter( \"%(asctime)s - %(name)s", "import ABC from abc import abstractmethod from asyncio import CancelledError", "to start listening to the server. Defaults to True. :type", "def _log_in(self, split_message: List[str]) -> None: \"\"\"Log the player with", "\"\"\" self._authentication_url = server_configuration.authentication_url self._avatar = avatar self._password = player_configuration.password", "start_listening: bool \"\"\" self._authentication_url = server_configuration.authentication_url self._avatar = avatar self._password", "split_message: Message received from the server that triggers logging in.", "\"challstr\": # Confirms connection to the server: we can login", "def _search_ladder_game(self, format_): await self._set_team() await self._send_message(f\"/search {format_}\") async def", "associated with user login. :return: The logged-in event :rtype: Event", "which the message should be sent. :type room: str :param", "about current challenge await self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"): # Battle update", "def username(self) -> str: \"\"\"The player's username. :return: The player's", "# Confirms connection to the server: we can login await", "%s\", message) elif split_message[1] == \"pm\": self.logger.info(\"Received pm: %s\", split_message)", "from asyncio import ensure_future from asyncio import Event from asyncio", "should keep track of current challenges. \"\"\" @property def logged_in(self)", "to which the message should be sent. :type room: str", "self.logger.exception( \"Unhandled exception raised while handling message:\\n%s\", message ) raise", "# Otherwise it is the one-th entry if split_message[1] ==", "and low-level message handling. \"\"\" def __init__( self, player_configuration: PlayerConfiguration,", "stream_handler = logging.StreamHandler() if log_level is not None: logger.setLevel(log_level) formatter", "of message is determined by the first entry in the", "\"act\": \"login\", \"name\": self._username, \"pass\": self._password, \"challstr\": split_message[2] + \"%7C\"", "str: \"\"\"The player's username. :return: The player's username. :rtype: str", "import ShowdownException from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration", "server. Defaults to True. :type start_listening: bool \"\"\" self._authentication_url =", "import List from typing import Optional from aiologger import Logger", "async with websockets.connect( self.websocket_url, max_queue=None ) as websocket: self._websocket =", "For battles, this is the zero-th entry # Otherwise it", "import CancelledError from asyncio import ensure_future from asyncio import Event", "player's username before messages. :param log_level: The logger's level. :type", "\"\"\"Changing the agent's username might solve this problem.\"\"\", self.username, split_message[2],", "async def listen(self) -> None: \"\"\"Listen to a showdown websocket", "coroutine in coroutines: coroutine.cancel() async def stop_listening(self) -> None: if", "async def _send_message( self, message: str, room: str = \"\",", "server_configuration: Server configuration. :type server_configuration: ServerConfiguration :param start_listening: Wheter to", "= \"\" await self._send_message(f\"/trn {self._username},0,{assertion}\") await self._change_avatar(self._avatar) async def _search_ladder_game(self,", "returned %s \"\"\" \"\"\"- this might prevent future actions from", "[\"nametaken\"]: self.logger.critical(\"Error message received: %s\", message) raise ShowdownException(\"Error message received:", "is not None: await self._send_message(f\"/avatar {avatar_id}\") def _create_player_logger(self, log_level: Optional[int])", "message_2: str, optional \"\"\" if message_2: to_send = \"|\".join([room, message,", "self.logger.info(\"Starting listening to showdown websocket\") coroutines = [] try: async", "ShowdownException(\"Error message received: %s\", message) elif split_message[1] == \"pm\": self.logger.info(\"Received", "else: self.logger.critical(\"Unhandled message: %s\", message) raise NotImplementedError(\"Unhandled message: %s\" %", "optional \"\"\" if message_2: to_send = \"|\".join([room, message, message_2]) else:", "avatar_id: int \"\"\" await self._wait_for_login() if avatar_id is not None:", ":type avatar_id: int \"\"\" await self._wait_for_login() if avatar_id is not", "def stop_listening(self) -> None: if self._listening_coroutine is not None: self._listening_coroutine.cancel()", "self.logger.info(\">>> %s\", to_send) async def _set_team(self): if self._team is not", "logger. :rtype: Logger \"\"\" return self._logger @property def username(self) ->", "player with specified username and password. Split message contains information", "logger(self) -> Logger: # pyre-ignore \"\"\"Logger associated with the player.", "username) async def _challenge(self, username: str, format_: str): assert self.logged_in.is_set()", "from time import perf_counter from typing import List from typing", "self._send_message(f\"/search {format_}\") async def _send_message( self, message: str, room: str", "%s\", split_message) else: self.logger.critical(\"Unhandled message: %s\", message) raise NotImplementedError(\"Unhandled message:", "+ split_message[3], }, ) self.logger.info(\"Sending authentication request\") assertion = json.loads(log_in_request.text[1:])[\"assertion\"]", ":param message: The message to send. :type message: str :param", "coroutines = [] try: async with websockets.connect( self.websocket_url, max_queue=None )", "split_message[1] == \"popup\": self.logger.warning(\"Popup message received: %s\", message) elif split_message[1]", "\"\"\" @abstractmethod async def _update_challenges(self, split_message: List[str]) -> None: \"\"\"Abstract", "be used to send a sequence of length 2. :param", "self._send_message(f\"/avatar {avatar_id}\") def _create_player_logger(self, log_level: Optional[int]) -> Logger: # pyre-ignore", "def listen(self) -> None: \"\"\"Listen to a showdown websocket and", "async def _handle_message(self, message: str) -> None: \"\"\"Handle received messages.", "to be handled.\"\"\" self.logger.info(\"Starting listening to showdown websocket\") coroutines =", "\"\"\" \"\"\"- this might prevent future actions from this agent.", "message received: %s\", message) raise ShowdownException(\"Error message received: %s\", message)", "%s\", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket connection with %s", "level. :type log_level: int. Defaults to logging's default level. :param", "logger async def _handle_message(self, message: str) -> None: \"\"\"Handle received", "None: if self._listening_coroutine is not None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod", "message received: %s\", message) elif split_message[1] == \"pm\": self.logger.info(\"Received pm:", "message) elif split_message[1] == \"pm\": self.logger.info(\"Received pm: %s\", split_message) else:", "= player_configuration.password self._username = player_configuration.username self._server_url = server_configuration.server_url self._logged_in: Event", "avatar self._password = player_configuration.password self._username = player_configuration.username self._server_url = server_configuration.server_url", "%s\", message) # Showdown websocket messages are pipe-separated sequences split_message", "listen(self) -> None: \"\"\"Listen to a showdown websocket and dispatch", "-> None: \"\"\" :param player_configuration: Player configuration. :type player_configuration: PlayerConfiguration", "track of current challenges. \"\"\" @property def logged_in(self) -> Event:", "player's avatar. :param avatar_id: The new avatar id. If None,", "defines a base class for communicating with showdown servers. \"\"\"", "# pyre-ignore self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore if start_listening:", "self.logger.critical(\"Error message received: %s\", message) raise ShowdownException(\"Error message received: %s\",", "split_message[0].startswith(\">battle\"): # Battle update await self._handle_battle_message(message) elif split_message[1] == \"updatesearch\":", "server. This information is necessary to log in. :param split_message:", "int :return: The logger. :rtype: Logger \"\"\" logger = logging.getLogger(self._username)", "self.logger.info(\"Bypassing authentication request\") assertion = \"\" await self._send_message(f\"/trn {self._username},0,{assertion}\") await", "as e: self.logger.critical(\"Listen interrupted by %s\", e) except Exception as", "Otherwise it is the one-th entry if split_message[1] == \"challstr\":", "-> None: \"\"\"Log the player with specified username and password.", "The room to which the message should be sent. :type", "self._send_message(\"/accept %s\" % username) async def _challenge(self, username: str, format_:", "sleep(checking_interval) if self.logged_in: return assert self.logged_in async def listen(self) ->", "room: str = \"\", message_2: Optional[str] = None ) ->", "player. Returns a Logger displaying asctime and the player's username", "username before messages. :param log_level: The logger's level. :type log_level:", "data={ \"act\": \"login\", \"name\": self._username, \"pass\": self._password, \"challstr\": split_message[2] +", "the specified room. `message_2` can be used to send a", "%s\" % self._team.yield_team()) async def _wait_for_login( self, checking_interval: float =", "}, ) self.logger.info(\"Sending authentication request\") assertion = json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing", "str, room: str = \"\", message_2: Optional[str] = None )", "self.logged_in: return assert self.logged_in async def listen(self) -> None: \"\"\"Listen", "== \"popup\": self.logger.warning(\"Popup message received: %s\", message) elif split_message[1] in", "self._logged_in @property def logger(self) -> Logger: # pyre-ignore \"\"\"Logger associated", "with showdown servers. \"\"\" import json import logging import requests", "in split_message[1]: # Contain information about current challenge await self._update_challenges(split_message)", "from asyncio import CancelledError from asyncio import ensure_future from asyncio", "message]) await self._websocket.send(to_send) self.logger.info(\">>> %s\", to_send) async def _set_team(self): if", "RuntimeError) as e: self.logger.critical(\"Listen interrupted by %s\", e) except Exception", "% username) async def _challenge(self, username: str, format_: str): assert", "coroutines: coroutine.cancel() async def stop_listening(self) -> None: if self._listening_coroutine is", "assert self.logged_in.is_set() await self._set_team() await self._send_message(\"/accept %s\" % username) async", "asyncio import Event from asyncio import Lock from asyncio import", "e: self.logger.critical(\"Listen interrupted by %s\", e) except Exception as e:", "await self._send_message(\"/accept %s\" % username) async def _challenge(self, username: str,", "\"\"\"Trying to login as %s, showdown returned %s \"\"\" \"\"\"-", "None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async def _handle_battle_message(self, message: str)", "True, ) -> None: \"\"\" :param player_configuration: Player configuration. :type", "first entry in the message # For battles, this is", "utf-8 -*- \"\"\"This module defines a base class for communicating", "Optional[int] = None, server_configuration: ServerConfiguration, start_listening: bool = True, )", "method. Implementation should redirect messages to corresponding battles. \"\"\" @abstractmethod", "> 1 # The type of message is determined by", "handling. \"\"\" def __init__( self, player_configuration: PlayerConfiguration, *, avatar: Optional[int]", "== \" \" + self._username: # Confirms successful login self.logged_in.set()", "of length 2. :param message: The message to send. :type", "\"updateuser\": if split_message[2] == \" \" + self._username: # Confirms", "showdown servers. \"\"\" import json import logging import requests import", "import Event from asyncio import Lock from asyncio import sleep", "messages. :param log_level: The logger's level. :type log_level: int :return:", "List[str]) -> None: \"\"\"Log the player with specified username and", ":return: The logged-in event :rtype: Event \"\"\" return self._logged_in @property", "typing import List from typing import Optional from aiologger import", "self._username = player_configuration.username self._server_url = server_configuration.server_url self._logged_in: Event = Event()", "split_message = message.split(\"|\") assert len(split_message) > 1 # The type", "\" \" + self._username: # Confirms successful login self.logged_in.set() elif", "from the server that triggers logging in. :type split_message: List[str]", "\"\"\"Event object associated with user login. :return: The logged-in event", "self._wait_for_login() if avatar_id is not None: await self._send_message(f\"/avatar {avatar_id}\") def", "Responsible for communicating with showdown servers. Also implements some higher", "tasks, such as changing avatar and low-level message handling. \"\"\"", "the player's avatar. :param avatar_id: The new avatar id. If", "The message to send. :type message: str :param room: The", "a Logger displaying asctime and the player's username before messages.", "\" + self._username: # Confirms successful login self.logged_in.set() elif not", "\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler)", "player_configuration.password self._username = player_configuration.username self._server_url = server_configuration.server_url self._logged_in: Event =", "The player's logger level. :type log_level: int. Defaults to logging's", "message handling. \"\"\" def __init__( self, player_configuration: PlayerConfiguration, *, avatar:", "problem.\"\"\", self.username, split_message[2], ) elif \"updatechallenges\" in split_message[1]: # Contain", "CancelledError as e: self.logger.critical(\"CancelledError intercepted. %s\", e) except Exception as", "List[str] \"\"\" if self._password: log_in_request = requests.post( self._authentication_url, data={ \"act\":", "def logged_in(self) -> Event: \"\"\"Event object associated with user login.", "message to the specified room. `message_2` can be used to", "\"\"\"Abstract method. Implementation should keep track of current challenges. \"\"\"", "def _change_avatar(self, avatar_id: Optional[int]) -> None: \"\"\"Changes the player's avatar.", "agent's username might solve this problem.\"\"\", self.username, split_message[2], ) elif", "determined by the first entry in the message # For", "message:\\n%s\", message ) raise exception async def _log_in(self, split_message: List[str])", "agent. \"\"\" \"\"\"Changing the agent's username might solve this problem.\"\"\",", "split_message: List[str]) -> None: \"\"\"Log the player with specified username", "sequence of length 2. :param message: The message to send.", "try: async with websockets.connect( self.websocket_url, max_queue=None ) as websocket: self._websocket", "avatar: Optional[int] = None, log_level: Optional[int] = None, server_configuration: ServerConfiguration,", "\"\"\"- this might prevent future actions from this agent. \"\"\"", "self.logger.debug(\"Ignored message: %s\", message) pass elif split_message[1] == \"popup\": self.logger.warning(\"Popup", "str, format_: str): assert self.logged_in.is_set() await self._set_team() await self._send_message(f\"/challenge {username},", "websocket: self.logger.info(\"<<< %s\", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket connection", "await self._set_team() await self._send_message(f\"/challenge {username}, {format_}\") async def _change_avatar(self, avatar_id:", "websocket_url(self) -> str: \"\"\"The websocket url. It is derived from", "self._username @property def websocket_url(self) -> str: \"\"\"The websocket url. It", "message in websocket: self.logger.info(\"<<< %s\", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning(", "self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async def _handle_battle_message(self, message: str) ->", "username: str) -> None: assert self.logged_in.is_set() await self._set_team() await self._send_message(\"/accept", "self, player_configuration: PlayerConfiguration, *, avatar: Optional[int] = None, log_level: Optional[int]", "= avatar self._password = player_configuration.password self._username = player_configuration.username self._server_url =", "assertion = json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing authentication request\") assertion = \"\"", "keep track of current challenges. \"\"\" @property def logged_in(self) ->", "message.split(\"|\") assert len(split_message) > 1 # The type of message", "return self._username @property def websocket_url(self) -> str: \"\"\"The websocket url.", "asyncio import ensure_future from asyncio import Event from asyncio import", "log_in_request = requests.post( self._authentication_url, data={ \"act\": \"login\", \"name\": self._username, \"pass\":", "ensure_future from asyncio import Event from asyncio import Lock from", "requests.post( self._authentication_url, data={ \"act\": \"login\", \"name\": self._username, \"pass\": self._password, \"challstr\":", "message: str) -> None: \"\"\"Handle received messages. :param message: The", "None: start = perf_counter() while perf_counter() - start < wait_for:", "received from the server that triggers logging in. :type split_message:", "from typing import Optional from aiologger import Logger # pyre-ignore", "assert self.logged_in async def listen(self) -> None: \"\"\"Listen to a", "assert self.logged_in.is_set() await self._set_team() await self._send_message(f\"/challenge {username}, {format_}\") async def", "start_listening: self._listening_coroutine = ensure_future(self.listen()) async def _accept_challenge(self, username: str) ->", "= \"|\".join([room, message, message_2]) else: to_send = \"|\".join([room, message]) await", "self._username: # Confirms successful login self.logged_in.set() elif not split_message[2].startswith(\" Guest", "is necessary to log in. :param split_message: Message received from", "self._password, \"challstr\": split_message[2] + \"%7C\" + split_message[3], }, ) self.logger.info(\"Sending", "-> str: \"\"\"The player's username. :return: The player's username. :rtype:", "\"login\", \"name\": self._username, \"pass\": self._password, \"challstr\": split_message[2] + \"%7C\" +", "is determined by the first entry in the message #", "Event = Event() self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore", "self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger =", "\"pm\": self.logger.info(\"Received pm: %s\", split_message) else: self.logger.critical(\"Unhandled message: %s\", message)", "self._team is not None: await self._send_message(\"/utm %s\" % self._team.yield_team()) async", "message: The message to parse. :type message: str \"\"\" try:", "by %s\", e) except Exception as e: self.logger.exception(e) finally: for", "request\") assertion = \"\" await self._send_message(f\"/trn {self._username},0,{assertion}\") await self._change_avatar(self._avatar) async", "If None, nothing happens. :type avatar_id: int \"\"\" await self._wait_for_login()", "int \"\"\" await self._wait_for_login() if avatar_id is not None: await", "be handled.\"\"\" self.logger.info(\"Starting listening to showdown websocket\") coroutines = []", "logged_in(self) -> Event: \"\"\"Event object associated with user login. :return:", "-> Logger: # pyre-ignore \"\"\"Creates a logger for the player.", "id. Optional. :type avatar: int, optional :param log_level: The player's", "message) # Showdown websocket messages are pipe-separated sequences split_message =", "Player configuration. :type player_configuration: PlayerConfiguration :param avatar: Player avatar id.", "logged-in event :rtype: Event \"\"\" return self._logged_in @property def logger(self)", "Confirms connection to the server: we can login await self._log_in(split_message)", "self.logger.info(\"Sending authentication request\") assertion = json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing authentication request\")", "message contains information sent by the server. This information is", "are pipe-separated sequences split_message = message.split(\"|\") assert len(split_message) > 1", "self.logger.exception(e) finally: for coroutine in coroutines: coroutine.cancel() async def stop_listening(self)", "@abstractmethod async def _handle_battle_message(self, message: str) -> None: \"\"\"Abstract method.", "in coroutines: coroutine.cancel() async def stop_listening(self) -> None: if self._listening_coroutine", "elif not split_message[2].startswith(\" Guest \"): self.logger.warning( \"\"\"Trying to login as", "if split_message[2] == \" \" + self._username: # Confirms successful", "{self._username},0,{assertion}\") await self._change_avatar(self._avatar) async def _search_ladder_game(self, format_): await self._set_team() await", "Logger: # pyre-ignore \"\"\"Creates a logger for the player. Returns", "not None: await self._send_message(\"/utm %s\" % self._team.yield_team()) async def _wait_for_login(", "from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC):", "== \"pm\": self.logger.info(\"Received pm: %s\", split_message) else: self.logger.critical(\"Unhandled message: %s\",", "message_2]) else: to_send = \"|\".join([room, message]) await self._websocket.send(to_send) self.logger.info(\">>> %s\",", "the server url. :return: The websocket url. :rtype: str \"\"\"", "async def _set_team(self): if self._team is not None: await self._send_message(\"/utm", "pipe-separated sequences split_message = message.split(\"|\") assert len(split_message) > 1 #", "closed\", self.websocket_url ) except (CancelledError, RuntimeError) as e: self.logger.critical(\"Listen interrupted", "start < wait_for: await sleep(checking_interval) if self.logged_in: return assert self.logged_in", "None ) -> None: \"\"\"Sends a message to the specified", "Optional. :type avatar: int, optional :param log_level: The player's logger", "\"\"\" def __init__( self, player_configuration: PlayerConfiguration, *, avatar: Optional[int] =", "message: str :param room: The room to which the message", "is not None: await self._send_message(\"/utm %s\" % self._team.yield_team()) async def", "import ensure_future from asyncio import Event from asyncio import Lock", "%(name)s - %(levelname)s - %(message)s\" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger", "pm: %s\", split_message) else: self.logger.critical(\"Unhandled message: %s\", message) raise NotImplementedError(\"Unhandled", "authentication request\") assertion = \"\" await self._send_message(f\"/trn {self._username},0,{assertion}\") await self._change_avatar(self._avatar)", "servers. Also implements some higher level methods for basic tasks,", "battles. \"\"\" @abstractmethod async def _update_challenges(self, split_message: List[str]) -> None:", "e) except Exception as exception: self.logger.exception( \"Unhandled exception raised while", "-> None: \"\"\"Handle received messages. :param message: The message to", "configuration. :type player_configuration: PlayerConfiguration :param avatar: Player avatar id. Optional.", "_create_player_logger(self, log_level: Optional[int]) -> Logger: # pyre-ignore \"\"\"Creates a logger", "async def _change_avatar(self, avatar_id: Optional[int]) -> None: \"\"\"Changes the player's", "= [] try: async with websockets.connect( self.websocket_url, max_queue=None ) as", "self._websocket.send(to_send) self.logger.info(\">>> %s\", to_send) async def _set_team(self): if self._team is", ":param server_configuration: Server configuration. :type server_configuration: ServerConfiguration :param start_listening: Wheter", "message # For battles, this is the zero-th entry #", "None: \"\"\" :param player_configuration: Player configuration. :type player_configuration: PlayerConfiguration :param", "communicating with showdown servers. \"\"\" import json import logging import", "1 # The type of message is determined by the", "implements some higher level methods for basic tasks, such as", ":param message: The message to parse. :type message: str \"\"\"", "with specified username and password. Split message contains information sent", ":type message_2: str, optional \"\"\" if message_2: to_send = \"|\".join([room,", "The logger. :rtype: Logger \"\"\" return self._logger @property def username(self)", "in websocket: self.logger.info(\"<<< %s\", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket", "@abstractmethod async def _update_challenges(self, split_message: List[str]) -> None: \"\"\"Abstract method.", "is the zero-th entry # Otherwise it is the one-th", "-> None: \"\"\"Abstract method. Implementation should redirect messages to corresponding", "default level. :param server_configuration: Server configuration. :type server_configuration: ServerConfiguration :param", "as changing avatar and low-level message handling. \"\"\" def __init__(", ":type log_level: int :return: The logger. :rtype: Logger \"\"\" logger", "a message to the specified room. `message_2` can be used", "-> None: if self._listening_coroutine is not None: self._listening_coroutine.cancel() await self._websocket.close()", "servers. \"\"\" import json import logging import requests import websockets", "log_level: int. Defaults to logging's default level. :param server_configuration: Server", "%s\", to_send) async def _set_team(self): if self._team is not None:", "ShowdownException from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration class", "if self.logged_in: return assert self.logged_in async def listen(self) -> None:", "# pyre-ignore from poke_env.exceptions import ShowdownException from poke_env.player_configuration import PlayerConfiguration", "handled.\"\"\" self.logger.info(\"Starting listening to showdown websocket\") coroutines = [] try:", ":param split_message: Message received from the server that triggers logging", "from typing import List from typing import Optional from aiologger", "sent by the server. This information is necessary to log", "self, checking_interval: float = 0.001, wait_for: int = 5 )", "and dispatch messages to be handled.\"\"\" self.logger.info(\"Starting listening to showdown", "self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"): # Battle update await self._handle_battle_message(message) elif split_message[1]", "The new avatar id. If None, nothing happens. :type avatar_id:", "in. :type split_message: List[str] \"\"\" if self._password: log_in_request = requests.post(", "the sequence to be sent. Optional. :type message_2: str, optional", "[] try: async with websockets.connect( self.websocket_url, max_queue=None ) as websocket:", "\"|\".join([room, message, message_2]) else: to_send = \"|\".join([room, message]) await self._websocket.send(to_send)", "to the server. Defaults to True. :type start_listening: bool \"\"\"", "str \"\"\" try: self.logger.debug(\"Received message to handle: %s\", message) #", "await self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"): # Battle update await self._handle_battle_message(message) elif", "json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing authentication request\") assertion = \"\" await self._send_message(f\"/trn", "player's username. :return: The player's username. :rtype: str \"\"\" return", "to send a sequence of length 2. :param message: The", "Second element of the sequence to be sent. Optional. :type", "\"\"\"Listen to a showdown websocket and dispatch messages to be", "if log_level is not None: logger.setLevel(log_level) formatter = logging.Formatter( \"%(asctime)s", "self.websocket_url ) except (CancelledError, RuntimeError) as e: self.logger.critical(\"Listen interrupted by", "\"%7C\" + split_message[3], }, ) self.logger.info(\"Sending authentication request\") assertion =", "connection to the server: we can login await self._log_in(split_message) elif", "\"\"\"Changes the player's avatar. :param avatar_id: The new avatar id.", "the zero-th entry # Otherwise it is the one-th entry", "self._send_message(\"/utm %s\" % self._team.yield_team()) async def _wait_for_login( self, checking_interval: float", "received: %s\", message) raise ShowdownException(\"Error message received: %s\", message) elif", "- %(name)s - %(levelname)s - %(message)s\" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return", "by the first entry in the message # For battles,", "to corresponding battles. \"\"\" @abstractmethod async def _update_challenges(self, split_message: List[str])", "= message.split(\"|\") assert len(split_message) > 1 # The type of", "self._username, \"pass\": self._password, \"challstr\": split_message[2] + \"%7C\" + split_message[3], },", "update await self._handle_battle_message(message) elif split_message[1] == \"updatesearch\": self.logger.debug(\"Ignored message: %s\",", "None: \"\"\"Abstract method. Implementation should redirect messages to corresponding battles.", "self.logged_in.set() elif not split_message[2].startswith(\" Guest \"): self.logger.warning( \"\"\"Trying to login", "from poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC): \"\"\" Network interface of", ") stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async def _handle_message(self, message: str)", "Exception as exception: self.logger.exception( \"Unhandled exception raised while handling message:\\n%s\",", "None, server_configuration: ServerConfiguration, start_listening: bool = True, ) -> None:", ":param log_level: The player's logger level. :type log_level: int. Defaults", "the player's username before messages. :param log_level: The logger's level.", "# pyre-ignore if start_listening: self._listening_coroutine = ensure_future(self.listen()) async def _accept_challenge(self,", "player_configuration: PlayerConfiguration :param avatar: Player avatar id. Optional. :type avatar:", "the first entry in the message # For battles, this", "\"\"\" \"\"\"Changing the agent's username might solve this problem.\"\"\", self.username,", "redirect messages to corresponding battles. \"\"\" @abstractmethod async def _update_challenges(self,", "is not None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async def _handle_battle_message(self,", "from asyncio import Event from asyncio import Lock from asyncio", "_challenge(self, username: str, format_: str): assert self.logged_in.is_set() await self._set_team() await", "< wait_for: await sleep(checking_interval) if self.logged_in: return assert self.logged_in async", "avatar. :param avatar_id: The new avatar id. If None, nothing", "current challenge await self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"): # Battle update await", "\"pass\": self._password, \"challstr\": split_message[2] + \"%7C\" + split_message[3], }, )", "ensure_future(self.listen()) async def _accept_challenge(self, username: str) -> None: assert self.logged_in.is_set()", "for message in websocket: self.logger.info(\"<<< %s\", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK:", "message: str, room: str = \"\", message_2: Optional[str] = None", "listening to the server. Defaults to True. :type start_listening: bool", "str :param message_2: Second element of the sequence to be", "%(message)s\" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async def _handle_message(self, message:", "# For battles, this is the zero-th entry # Otherwise", "async def _challenge(self, username: str, format_: str): assert self.logged_in.is_set() await", "Logger \"\"\" logger = logging.getLogger(self._username) stream_handler = logging.StreamHandler() if log_level", "_handle_message(self, message: str) -> None: \"\"\"Handle received messages. :param message:", "int = 5 ) -> None: start = perf_counter() while", "methods for basic tasks, such as changing avatar and low-level", "message received: %s\", message) elif split_message[1] in [\"nametaken\"]: self.logger.critical(\"Error message", "await sleep(checking_interval) if self.logged_in: return assert self.logged_in async def listen(self)", "level. :type log_level: int :return: The logger. :rtype: Logger \"\"\"", "should be sent. :type room: str :param message_2: Second element", "a base class for communicating with showdown servers. \"\"\" import", "by the server. This information is necessary to log in.", "try: self.logger.debug(\"Received message to handle: %s\", message) # Showdown websocket", "self._authentication_url = server_configuration.authentication_url self._avatar = avatar self._password = player_configuration.password self._username", "await self._send_message(\"/utm %s\" % self._team.yield_team()) async def _wait_for_login( self, checking_interval:", ") elif \"updatechallenges\" in split_message[1]: # Contain information about current", "message: The message to send. :type message: str :param room:", "prevent future actions from this agent. \"\"\" \"\"\"Changing the agent's", "= \"|\".join([room, message]) await self._websocket.send(to_send) self.logger.info(\">>> %s\", to_send) async def", ":return: The player's username. :rtype: str \"\"\" return self._username @property", "one-th entry if split_message[1] == \"challstr\": # Confirms connection to", "derived from the server url. :return: The websocket url. :rtype:", "self._server_url = server_configuration.server_url self._logged_in: Event = Event() self._sending_lock = Lock()", "be sent. :type room: str :param message_2: Second element of", "await self._send_message(f\"/challenge {username}, {format_}\") async def _change_avatar(self, avatar_id: Optional[int]) ->", "avatar: int, optional :param log_level: The player's logger level. :type", "The logger. :rtype: Logger \"\"\" logger = logging.getLogger(self._username) stream_handler =", "await self._send_message(f\"/avatar {avatar_id}\") def _create_player_logger(self, log_level: Optional[int]) -> Logger: #", "self.logger.warning( \"Websocket connection with %s closed\", self.websocket_url ) except (CancelledError,", "Network interface of a player. Responsible for communicating with showdown", "-> None: \"\"\"Sends a message to the specified room. `message_2`", "to_send = \"|\".join([room, message]) await self._websocket.send(to_send) self.logger.info(\">>> %s\", to_send) async", "elif split_message[1] == \"updatesearch\": self.logger.debug(\"Ignored message: %s\", message) pass elif", "in the message # For battles, this is the zero-th", "specified username and password. Split message contains information sent by", "room: The room to which the message should be sent.", "the agent's username might solve this problem.\"\"\", self.username, split_message[2], )", "%s\" % message) except CancelledError as e: self.logger.critical(\"CancelledError intercepted. %s\",", "<filename>src/poke_env/player/player_network_interface.py<gh_stars>0 # -*- coding: utf-8 -*- \"\"\"This module defines a", "level. :param server_configuration: Server configuration. :type server_configuration: ServerConfiguration :param start_listening:", "\"challstr\": split_message[2] + \"%7C\" + split_message[3], }, ) self.logger.info(\"Sending authentication", "= 5 ) -> None: start = perf_counter() while perf_counter()", "password. Split message contains information sent by the server. This", "with %s closed\", self.websocket_url ) except (CancelledError, RuntimeError) as e:", "import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC): \"\"\" Network", "\"\"\"The websocket url. It is derived from the server url.", "this might prevent future actions from this agent. \"\"\" \"\"\"Changing", "should redirect messages to corresponding battles. \"\"\" @abstractmethod async def", ":param room: The room to which the message should be", "None: \"\"\"Log the player with specified username and password. Split", "This information is necessary to log in. :param split_message: Message", "from abc import abstractmethod from asyncio import CancelledError from asyncio", "showdown servers. Also implements some higher level methods for basic", "return logger async def _handle_message(self, message: str) -> None: \"\"\"Handle", "PlayerConfiguration from poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC): \"\"\" Network interface", "def logger(self) -> Logger: # pyre-ignore \"\"\"Logger associated with the", "a showdown websocket and dispatch messages to be handled.\"\"\" self.logger.info(\"Starting", "= requests.post( self._authentication_url, data={ \"act\": \"login\", \"name\": self._username, \"pass\": self._password,", "asyncio import Lock from asyncio import sleep from time import", "pyre-ignore from poke_env.exceptions import ShowdownException from poke_env.player_configuration import PlayerConfiguration from", "login. :return: The logged-in event :rtype: Event \"\"\" return self._logged_in", "to parse. :type message: str \"\"\" try: self.logger.debug(\"Received message to", "self.logger.warning(\"Popup message received: %s\", message) elif split_message[1] in [\"nametaken\"]: self.logger.critical(\"Error", "Defaults to True. :type start_listening: bool \"\"\" self._authentication_url = server_configuration.authentication_url", "information sent by the server. This information is necessary to", "self._set_team() await self._send_message(\"/accept %s\" % username) async def _challenge(self, username:", "\"\"\"Sends a message to the specified room. `message_2` can be", "server_configuration.authentication_url self._avatar = avatar self._password = player_configuration.password self._username = player_configuration.username", ") -> None: \"\"\" :param player_configuration: Player configuration. :type player_configuration:", "Implementation should redirect messages to corresponding battles. \"\"\" @abstractmethod async", "import ServerConfiguration class PlayerNetwork(ABC): \"\"\" Network interface of a player.", "raise exception async def _log_in(self, split_message: List[str]) -> None: \"\"\"Log", "\"\"\" if message_2: to_send = \"|\".join([room, message, message_2]) else: to_send", ":return: The logger. :rtype: Logger \"\"\" logger = logging.getLogger(self._username) stream_handler", "username(self) -> str: \"\"\"The player's username. :return: The player's username.", ":type room: str :param message_2: Second element of the sequence", "changing avatar and low-level message handling. \"\"\" def __init__( self,", "self.logger.warning( \"\"\"Trying to login as %s, showdown returned %s \"\"\"", "message_2: Second element of the sequence to be sent. Optional.", "message should be sent. :type room: str :param message_2: Second", "raised while handling message:\\n%s\", message ) raise exception async def", ":param avatar: Player avatar id. Optional. :type avatar: int, optional", "Player avatar id. Optional. :type avatar: int, optional :param log_level:", "for basic tasks, such as changing avatar and low-level message", "message to handle: %s\", message) # Showdown websocket messages are", ":param message_2: Second element of the sequence to be sent.", "log in. :param split_message: Message received from the server that", "0.001, wait_for: int = 5 ) -> None: start =", "is the one-th entry if split_message[1] == \"challstr\": # Confirms", ":param start_listening: Wheter to start listening to the server. Defaults", "= None, server_configuration: ServerConfiguration, start_listening: bool = True, ) ->", "= True, ) -> None: \"\"\" :param player_configuration: Player configuration.", "-> None: \"\"\"Changes the player's avatar. :param avatar_id: The new", "def _accept_challenge(self, username: str) -> None: assert self.logged_in.is_set() await self._set_team()", ") -> None: \"\"\"Sends a message to the specified room.", "\"\" await self._send_message(f\"/trn {self._username},0,{assertion}\") await self._change_avatar(self._avatar) async def _search_ladder_game(self, format_):", "coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket connection with %s closed\", self.websocket_url", "e) except Exception as e: self.logger.exception(e) finally: for coroutine in", "\"\"\"Log the player with specified username and password. Split message", "e: self.logger.exception(e) finally: for coroutine in coroutines: coroutine.cancel() async def", "Logger \"\"\" return self._logger @property def username(self) -> str: \"\"\"The", "{format_}\") async def _send_message( self, message: str, room: str =", "self._logged_in: Event = Event() self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol #", "self._set_team() await self._send_message(f\"/search {format_}\") async def _send_message( self, message: str,", "start_listening: bool = True, ) -> None: \"\"\" :param player_configuration:", "%s, showdown returned %s \"\"\" \"\"\"- this might prevent future", "self.logger.info(\"<<< %s\", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket connection with", ":param avatar_id: The new avatar id. If None, nothing happens.", "new avatar id. If None, nothing happens. :type avatar_id: int", "async for message in websocket: self.logger.info(\"<<< %s\", message) coroutines.append(ensure_future(self._handle_message(message))) except", "websocket and dispatch messages to be handled.\"\"\" self.logger.info(\"Starting listening to", "\"updatechallenges\" in split_message[1]: # Contain information about current challenge await", "pass elif split_message[1] == \"popup\": self.logger.warning(\"Popup message received: %s\", message)", "-> None: start = perf_counter() while perf_counter() - start <", "player's username. :rtype: str \"\"\" return self._username @property def websocket_url(self)", "import sleep from time import perf_counter from typing import List", "-> None: \"\"\"Listen to a showdown websocket and dispatch messages", "to log in. :param split_message: Message received from the server", "async def _wait_for_login( self, checking_interval: float = 0.001, wait_for: int", "@property def websocket_url(self) -> str: \"\"\"The websocket url. It is", "Optional from aiologger import Logger # pyre-ignore from poke_env.exceptions import", "to_send) async def _set_team(self): if self._team is not None: await", "await self._send_message(f\"/search {format_}\") async def _send_message( self, message: str, room:", "def _update_challenges(self, split_message: List[str]) -> None: \"\"\"Abstract method. Implementation should", "\"Websocket connection with %s closed\", self.websocket_url ) except (CancelledError, RuntimeError)", "2. :param message: The message to send. :type message: str", "str \"\"\" return self._username @property def websocket_url(self) -> str: \"\"\"The", "# Showdown websocket messages are pipe-separated sequences split_message = message.split(\"|\")", "information about current challenge await self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"): # Battle", "self._websocket = websocket async for message in websocket: self.logger.info(\"<<< %s\",", "if message_2: to_send = \"|\".join([room, message, message_2]) else: to_send =", "self._authentication_url, data={ \"act\": \"login\", \"name\": self._username, \"pass\": self._password, \"challstr\": split_message[2]", "not None: logger.setLevel(log_level) formatter = logging.Formatter( \"%(asctime)s - %(name)s -", "specified room. `message_2` can be used to send a sequence", "avatar id. Optional. :type avatar: int, optional :param log_level: The", ":param log_level: The logger's level. :type log_level: int :return: The", "low-level message handling. \"\"\" def __init__( self, player_configuration: PlayerConfiguration, *,", "Defaults to logging's default level. :param server_configuration: Server configuration. :type", "CancelledError from asyncio import ensure_future from asyncio import Event from", "showdown returned %s \"\"\" \"\"\"- this might prevent future actions", "poke_env.exceptions import ShowdownException from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import", "Wheter to start listening to the server. Defaults to True.", "checking_interval: float = 0.001, wait_for: int = 5 ) ->", "while perf_counter() - start < wait_for: await sleep(checking_interval) if self.logged_in:", "return self._logged_in @property def logger(self) -> Logger: # pyre-ignore \"\"\"Logger", "received messages. :param message: The message to parse. :type message:", "start = perf_counter() while perf_counter() - start < wait_for: await", "avatar_id: Optional[int]) -> None: \"\"\"Changes the player's avatar. :param avatar_id:", "self._send_message(f\"/challenge {username}, {format_}\") async def _change_avatar(self, avatar_id: Optional[int]) -> None:", "displaying asctime and the player's username before messages. :param log_level:", "url. It is derived from the server url. :return: The", "parse. :type message: str \"\"\" try: self.logger.debug(\"Received message to handle:", "current challenges. \"\"\" @property def logged_in(self) -> Event: \"\"\"Event object", "coroutine.cancel() async def stop_listening(self) -> None: if self._listening_coroutine is not", "logger. :rtype: Logger \"\"\" logger = logging.getLogger(self._username) stream_handler = logging.StreamHandler()", "received: %s\", message) elif split_message[1] in [\"nametaken\"]: self.logger.critical(\"Error message received:", "can be used to send a sequence of length 2.", "\"\"\"This module defines a base class for communicating with showdown", "username and password. Split message contains information sent by the", "split_message: List[str] \"\"\" if self._password: log_in_request = requests.post( self._authentication_url, data={", "element of the sequence to be sent. Optional. :type message_2:", "% self._team.yield_team()) async def _wait_for_login( self, checking_interval: float = 0.001,", "def _handle_battle_message(self, message: str) -> None: \"\"\"Abstract method. Implementation should", "await self._send_message(f\"/trn {self._username},0,{assertion}\") await self._change_avatar(self._avatar) async def _search_ladder_game(self, format_): await", "self._send_message(f\"/trn {self._username},0,{assertion}\") await self._change_avatar(self._avatar) async def _search_ladder_game(self, format_): await self._set_team()", "messages to corresponding battles. \"\"\" @abstractmethod async def _update_challenges(self, split_message:", "formatter = logging.Formatter( \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"", "elif split_message[0].startswith(\">battle\"): # Battle update await self._handle_battle_message(message) elif split_message[1] ==", "for coroutine in coroutines: coroutine.cancel() async def stop_listening(self) -> None:", "_wait_for_login( self, checking_interval: float = 0.001, wait_for: int = 5", "\"\"\"Logger associated with the player. :return: The logger. :rtype: Logger", "higher level methods for basic tasks, such as changing avatar", "actions from this agent. \"\"\" \"\"\"Changing the agent's username might", "messages. :param message: The message to parse. :type message: str", "-*- coding: utf-8 -*- \"\"\"This module defines a base class", "import perf_counter from typing import List from typing import Optional", "Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger = self._create_player_logger(log_level) #", "asyncio import sleep from time import perf_counter from typing import", "the one-th entry if split_message[1] == \"challstr\": # Confirms connection", "raise ShowdownException(\"Error message received: %s\", message) elif split_message[1] == \"pm\":", "PlayerNetwork(ABC): \"\"\" Network interface of a player. Responsible for communicating", "def __init__( self, player_configuration: PlayerConfiguration, *, avatar: Optional[int] = None,", "the player with specified username and password. Split message contains", "triggers logging in. :type split_message: List[str] \"\"\" if self._password: log_in_request", "str) -> None: \"\"\"Handle received messages. :param message: The message", "Event \"\"\" return self._logged_in @property def logger(self) -> Logger: #", "server: we can login await self._log_in(split_message) elif split_message[1] == \"updateuser\":", ":type avatar: int, optional :param log_level: The player's logger level.", "not split_message[2].startswith(\" Guest \"): self.logger.warning( \"\"\"Trying to login as %s,", "coding: utf-8 -*- \"\"\"This module defines a base class for", "server_configuration: ServerConfiguration :param start_listening: Wheter to start listening to the", "username: str, format_: str): assert self.logged_in.is_set() await self._set_team() await self._send_message(f\"/challenge", "message: %s\", message) pass elif split_message[1] == \"popup\": self.logger.warning(\"Popup message", "elif split_message[1] in [\"nametaken\"]: self.logger.critical(\"Error message received: %s\", message) raise", "to True. :type start_listening: bool \"\"\" self._authentication_url = server_configuration.authentication_url self._avatar", "split_message) else: self.logger.critical(\"Unhandled message: %s\", message) raise NotImplementedError(\"Unhandled message: %s\"", "The player's username. :rtype: str \"\"\" return self._username @property def", "Server configuration. :type server_configuration: ServerConfiguration :param start_listening: Wheter to start", "= player_configuration.username self._server_url = server_configuration.server_url self._logged_in: Event = Event() self._sending_lock", "@property def username(self) -> str: \"\"\"The player's username. :return: The", "challenge await self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"): # Battle update await self._handle_battle_message(message)", "-*- \"\"\"This module defines a base class for communicating with", ":type log_level: int. Defaults to logging's default level. :param server_configuration:", "str, optional \"\"\" if message_2: to_send = \"|\".join([room, message, message_2])", "= websocket async for message in websocket: self.logger.info(\"<<< %s\", message)", "{format_}\") async def _change_avatar(self, avatar_id: Optional[int]) -> None: \"\"\"Changes the", "split_message[1]: # Contain information about current challenge await self._update_challenges(split_message) elif", "while handling message:\\n%s\", message ) raise exception async def _log_in(self,", "object associated with user login. :return: The logged-in event :rtype:", "player's logger level. :type log_level: int. Defaults to logging's default", "ServerConfiguration :param start_listening: Wheter to start listening to the server.", "log_level is not None: logger.setLevel(log_level) formatter = logging.Formatter( \"%(asctime)s -", "exception raised while handling message:\\n%s\", message ) raise exception async", "The logger's level. :type log_level: int :return: The logger. :rtype:", "None: \"\"\"Handle received messages. :param message: The message to parse.", "%s\", message) pass elif split_message[1] == \"popup\": self.logger.warning(\"Popup message received:", "self.logged_in async def listen(self) -> None: \"\"\"Listen to a showdown", "max_queue=None ) as websocket: self._websocket = websocket async for message", "log_level: Optional[int] = None, server_configuration: ServerConfiguration, start_listening: bool = True,", "\"\"\"The player's username. :return: The player's username. :rtype: str \"\"\"", "room: str :param message_2: Second element of the sequence to", "\"updatesearch\": self.logger.debug(\"Ignored message: %s\", message) pass elif split_message[1] == \"popup\":", ") -> None: start = perf_counter() while perf_counter() - start", "of the sequence to be sent. Optional. :type message_2: str,", "future actions from this agent. \"\"\" \"\"\"Changing the agent's username", "None: \"\"\"Sends a message to the specified room. `message_2` can", "Also implements some higher level methods for basic tasks, such", "split_message[2].startswith(\" Guest \"): self.logger.warning( \"\"\"Trying to login as %s, showdown", "Optional. :type message_2: str, optional \"\"\" if message_2: to_send =", "a logger for the player. Returns a Logger displaying asctime", "optional :param log_level: The player's logger level. :type log_level: int.", "Optional[int]) -> None: \"\"\"Changes the player's avatar. :param avatar_id: The", "is derived from the server url. :return: The websocket url.", "stop_listening(self) -> None: if self._listening_coroutine is not None: self._listening_coroutine.cancel() await", "handle: %s\", message) # Showdown websocket messages are pipe-separated sequences", "Battle update await self._handle_battle_message(message) elif split_message[1] == \"updatesearch\": self.logger.debug(\"Ignored message:", "solve this problem.\"\"\", self.username, split_message[2], ) elif \"updatechallenges\" in split_message[1]:", "message) elif split_message[1] in [\"nametaken\"]: self.logger.critical(\"Error message received: %s\", message)", "@property def logged_in(self) -> Event: \"\"\"Event object associated with user", "logging.Formatter( \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" ) stream_handler.setFormatter(formatter)", "= logging.Formatter( \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" )", "pyre-ignore from abc import ABC from abc import abstractmethod from", "some higher level methods for basic tasks, such as changing", "raise NotImplementedError(\"Unhandled message: %s\" % message) except CancelledError as e:", "-> Event: \"\"\"Event object associated with user login. :return: The", "str :param room: The room to which the message should", "None: assert self.logged_in.is_set() await self._set_team() await self._send_message(\"/accept %s\" % username)", "if self._listening_coroutine is not None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async", "to logging's default level. :param server_configuration: Server configuration. :type server_configuration:", "def _send_message( self, message: str, room: str = \"\", message_2:", "message: str \"\"\" try: self.logger.debug(\"Received message to handle: %s\", message)", "log_level: Optional[int]) -> Logger: # pyre-ignore \"\"\"Creates a logger for", "%s closed\", self.websocket_url ) except (CancelledError, RuntimeError) as e: self.logger.critical(\"Listen", "def _handle_message(self, message: str) -> None: \"\"\"Handle received messages. :param", "await self._change_avatar(self._avatar) async def _search_ladder_game(self, format_): await self._set_team() await self._send_message(f\"/search", ") as websocket: self._websocket = websocket async for message in", "# Confirms successful login self.logged_in.set() elif not split_message[2].startswith(\" Guest \"):", "exception: self.logger.exception( \"Unhandled exception raised while handling message:\\n%s\", message )", "def _challenge(self, username: str, format_: str): assert self.logged_in.is_set() await self._set_team()", "None: \"\"\"Changes the player's avatar. :param avatar_id: The new avatar", "from poke_env.exceptions import ShowdownException from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration", "log_level: int :return: The logger. :rtype: Logger \"\"\" logger =", "logger = logging.getLogger(self._username) stream_handler = logging.StreamHandler() if log_level is not", "successful login self.logged_in.set() elif not split_message[2].startswith(\" Guest \"): self.logger.warning( \"\"\"Trying", "Guest \"): self.logger.warning( \"\"\"Trying to login as %s, showdown returned", "message ) raise exception async def _log_in(self, split_message: List[str]) ->", "\"): self.logger.warning( \"\"\"Trying to login as %s, showdown returned %s", "and password. Split message contains information sent by the server.", "_handle_battle_message(self, message: str) -> None: \"\"\"Abstract method. Implementation should redirect", "Implementation should keep track of current challenges. \"\"\" @property def", "from abc import ABC from abc import abstractmethod from asyncio", "\"\"\" Network interface of a player. Responsible for communicating with", "import abstractmethod from asyncio import CancelledError from asyncio import ensure_future", "ServerConfiguration, start_listening: bool = True, ) -> None: \"\"\" :param", "sequence to be sent. Optional. :type message_2: str, optional \"\"\"", "== \"challstr\": # Confirms connection to the server: we can", "dispatch messages to be handled.\"\"\" self.logger.info(\"Starting listening to showdown websocket\")", ":rtype: Event \"\"\" return self._logged_in @property def logger(self) -> Logger:", ") except (CancelledError, RuntimeError) as e: self.logger.critical(\"Listen interrupted by %s\",", "self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore", "the server. This information is necessary to log in. :param", "\"name\": self._username, \"pass\": self._password, \"challstr\": split_message[2] + \"%7C\" + split_message[3],", "It is derived from the server url. :return: The websocket", "to_send = \"|\".join([room, message, message_2]) else: to_send = \"|\".join([room, message])", "= None ) -> None: \"\"\"Sends a message to the", "== \"updatesearch\": self.logger.debug(\"Ignored message: %s\", message) pass elif split_message[1] ==", "_send_message( self, message: str, room: str = \"\", message_2: Optional[str]", "def websocket_url(self) -> str: \"\"\"The websocket url. It is derived", "sequences split_message = message.split(\"|\") assert len(split_message) > 1 # The", "\"\"\"Abstract method. Implementation should redirect messages to corresponding battles. \"\"\"", "return self._logger @property def username(self) -> str: \"\"\"The player's username.", "None: await self._send_message(f\"/avatar {avatar_id}\") def _create_player_logger(self, log_level: Optional[int]) -> Logger:", "\"\"\" logger = logging.getLogger(self._username) stream_handler = logging.StreamHandler() if log_level is", "PlayerConfiguration :param avatar: Player avatar id. Optional. :type avatar: int,", "configuration. :type server_configuration: ServerConfiguration :param start_listening: Wheter to start listening", "= Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger = self._create_player_logger(log_level)", "elif split_message[1] == \"updateuser\": if split_message[2] == \" \" +", ":type player_configuration: PlayerConfiguration :param avatar: Player avatar id. Optional. :type", "def _set_team(self): if self._team is not None: await self._send_message(\"/utm %s\"", "websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket connection with %s closed\", self.websocket_url ) except", "_set_team(self): if self._team is not None: await self._send_message(\"/utm %s\" %", "import websockets # pyre-ignore from abc import ABC from abc", "from this agent. \"\"\" \"\"\"Changing the agent's username might solve", "_log_in(self, split_message: List[str]) -> None: \"\"\"Log the player with specified", ":rtype: Logger \"\"\" logger = logging.getLogger(self._username) stream_handler = logging.StreamHandler() if", "corresponding battles. \"\"\" @abstractmethod async def _update_challenges(self, split_message: List[str]) ->", "NotImplementedError(\"Unhandled message: %s\" % message) except CancelledError as e: self.logger.critical(\"CancelledError", "+ self._username: # Confirms successful login self.logged_in.set() elif not split_message[2].startswith(\"", "# pyre-ignore from abc import ABC from abc import abstractmethod", "\"\"\"Handle received messages. :param message: The message to parse. :type", "login as %s, showdown returned %s \"\"\" \"\"\"- this might", "be sent. Optional. :type message_2: str, optional \"\"\" if message_2:", "Returns a Logger displaying asctime and the player's username before", "websocket: self._websocket = websocket async for message in websocket: self.logger.info(\"<<<", "\"\"\" :param player_configuration: Player configuration. :type player_configuration: PlayerConfiguration :param avatar:", "from the server url. :return: The websocket url. :rtype: str", "logging's default level. :param server_configuration: Server configuration. :type server_configuration: ServerConfiguration", "exception async def _log_in(self, split_message: List[str]) -> None: \"\"\"Log the", "message_2: to_send = \"|\".join([room, message, message_2]) else: to_send = \"|\".join([room,", "= server_configuration.server_url self._logged_in: Event = Event() self._sending_lock = Lock() self._websocket:", "self._set_team() await self._send_message(f\"/challenge {username}, {format_}\") async def _change_avatar(self, avatar_id: Optional[int])", "it is the one-th entry if split_message[1] == \"challstr\": #", "else: self.logger.info(\"Bypassing authentication request\") assertion = \"\" await self._send_message(f\"/trn {self._username},0,{assertion}\")", "player. Responsible for communicating with showdown servers. Also implements some", "the server. Defaults to True. :type start_listening: bool \"\"\" self._authentication_url", "split_message[3], }, ) self.logger.info(\"Sending authentication request\") assertion = json.loads(log_in_request.text[1:])[\"assertion\"] else:", "zero-th entry # Otherwise it is the one-th entry if", "= None, log_level: Optional[int] = None, server_configuration: ServerConfiguration, start_listening: bool", "entry # Otherwise it is the one-th entry if split_message[1]", "not None: await self._send_message(f\"/avatar {avatar_id}\") def _create_player_logger(self, log_level: Optional[int]) ->", "message) except CancelledError as e: self.logger.critical(\"CancelledError intercepted. %s\", e) except", "self.logger.critical(\"CancelledError intercepted. %s\", e) except Exception as exception: self.logger.exception( \"Unhandled", "server that triggers logging in. :type split_message: List[str] \"\"\" if", "ServerConfiguration class PlayerNetwork(ABC): \"\"\" Network interface of a player. Responsible", "await self._set_team() await self._send_message(\"/accept %s\" % username) async def _challenge(self,", "sleep from time import perf_counter from typing import List from", "showdown websocket and dispatch messages to be handled.\"\"\" self.logger.info(\"Starting listening", "\"\"\" if self._password: log_in_request = requests.post( self._authentication_url, data={ \"act\": \"login\",", "self.logger.critical(\"Listen interrupted by %s\", e) except Exception as e: self.logger.exception(e)", "the message # For battles, this is the zero-th entry", "message to parse. :type message: str \"\"\" try: self.logger.debug(\"Received message", "login await self._log_in(split_message) elif split_message[1] == \"updateuser\": if split_message[2] ==", "%(levelname)s - %(message)s\" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async def", "start_listening: Wheter to start listening to the server. Defaults to", "room. `message_2` can be used to send a sequence of", "self.logger.critical(\"Unhandled message: %s\", message) raise NotImplementedError(\"Unhandled message: %s\" % message)", "self._team.yield_team()) async def _wait_for_login( self, checking_interval: float = 0.001, wait_for:", "might prevent future actions from this agent. \"\"\" \"\"\"Changing the", "self.logger.debug(\"Received message to handle: %s\", message) # Showdown websocket messages", "logging in. :type split_message: List[str] \"\"\" if self._password: log_in_request =", "avatar_id: The new avatar id. If None, nothing happens. :type", "message to send. :type message: str :param room: The room", "PlayerConfiguration, *, avatar: Optional[int] = None, log_level: Optional[int] = None,", "int. Defaults to logging's default level. :param server_configuration: Server configuration.", "avatar: Player avatar id. Optional. :type avatar: int, optional :param", ":type server_configuration: ServerConfiguration :param start_listening: Wheter to start listening to", "to be sent. Optional. :type message_2: str, optional \"\"\" if", "async def stop_listening(self) -> None: if self._listening_coroutine is not None:", "bool = True, ) -> None: \"\"\" :param player_configuration: Player", "of current challenges. \"\"\" @property def logged_in(self) -> Event: \"\"\"Event", "logger's level. :type log_level: int :return: The logger. :rtype: Logger", "the player. :return: The logger. :rtype: Logger \"\"\" return self._logger", "connection with %s closed\", self.websocket_url ) except (CancelledError, RuntimeError) as", "None, nothing happens. :type avatar_id: int \"\"\" await self._wait_for_login() if", ") raise exception async def _log_in(self, split_message: List[str]) -> None:", "of a player. Responsible for communicating with showdown servers. Also", "split_message[1] == \"challstr\": # Confirms connection to the server: we", "contains information sent by the server. This information is necessary", "\"\", message_2: Optional[str] = None ) -> None: \"\"\"Sends a", "message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket connection with %s closed\",", "asctime and the player's username before messages. :param log_level: The", "abc import ABC from abc import abstractmethod from asyncio import", "to the server: we can login await self._log_in(split_message) elif split_message[1]", "split_message[2], ) elif \"updatechallenges\" in split_message[1]: # Contain information about", "import Logger # pyre-ignore from poke_env.exceptions import ShowdownException from poke_env.player_configuration", "str = \"\", message_2: Optional[str] = None ) -> None:", "if avatar_id is not None: await self._send_message(f\"/avatar {avatar_id}\") def _create_player_logger(self,", "not None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async def _handle_battle_message(self, message:", "-> None: \"\"\"Abstract method. Implementation should keep track of current", "\"\"\" return self._username @property def websocket_url(self) -> str: \"\"\"The websocket", "websocket async for message in websocket: self.logger.info(\"<<< %s\", message) coroutines.append(ensure_future(self._handle_message(message)))", "else: to_send = \"|\".join([room, message]) await self._websocket.send(to_send) self.logger.info(\">>> %s\", to_send)", "if self._team is not None: await self._send_message(\"/utm %s\" % self._team.yield_team())", "player. :return: The logger. :rtype: Logger \"\"\" return self._logger @property", "e: self.logger.critical(\"CancelledError intercepted. %s\", e) except Exception as exception: self.logger.exception(", "start listening to the server. Defaults to True. :type start_listening:", "5 ) -> None: start = perf_counter() while perf_counter() -", "-> Logger: # pyre-ignore \"\"\"Logger associated with the player. :return:", "abstractmethod from asyncio import CancelledError from asyncio import ensure_future from", "username. :rtype: str \"\"\" return self._username @property def websocket_url(self) ->", ") self.logger.info(\"Sending authentication request\") assertion = json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing authentication", "format_): await self._set_team() await self._send_message(f\"/search {format_}\") async def _send_message( self,", "with user login. :return: The logged-in event :rtype: Event \"\"\"", "Exception as e: self.logger.exception(e) finally: for coroutine in coroutines: coroutine.cancel()", "websocket messages are pipe-separated sequences split_message = message.split(\"|\") assert len(split_message)", "Event() self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger", "split_message[2] == \" \" + self._username: # Confirms successful login", "entry if split_message[1] == \"challstr\": # Confirms connection to the", "request\") assertion = json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing authentication request\") assertion =", "websockets # pyre-ignore from abc import ABC from abc import", "as exception: self.logger.exception( \"Unhandled exception raised while handling message:\\n%s\", message", "Event: \"\"\"Event object associated with user login. :return: The logged-in", "str) -> None: \"\"\"Abstract method. Implementation should redirect messages to", "necessary to log in. :param split_message: Message received from the", "avatar_id is not None: await self._send_message(f\"/avatar {avatar_id}\") def _create_player_logger(self, log_level:", "type of message is determined by the first entry in", "can login await self._log_in(split_message) elif split_message[1] == \"updateuser\": if split_message[2]", "to send. :type message: str :param room: The room to", "def _create_player_logger(self, log_level: Optional[int]) -> Logger: # pyre-ignore \"\"\"Creates a", "self._log_in(split_message) elif split_message[1] == \"updateuser\": if split_message[2] == \" \"", "message) raise NotImplementedError(\"Unhandled message: %s\" % message) except CancelledError as", "messages to be handled.\"\"\" self.logger.info(\"Starting listening to showdown websocket\") coroutines", "async def _handle_battle_message(self, message: str) -> None: \"\"\"Abstract method. Implementation", ":rtype: Logger \"\"\" return self._logger @property def username(self) -> str:", "avatar and low-level message handling. \"\"\" def __init__( self, player_configuration:", "self.logged_in.is_set() await self._set_team() await self._send_message(\"/accept %s\" % username) async def", "Optional[int]) -> Logger: # pyre-ignore \"\"\"Creates a logger for the", "battles, this is the zero-th entry # Otherwise it is", "= logging.StreamHandler() if log_level is not None: logger.setLevel(log_level) formatter =", "Lock from asyncio import sleep from time import perf_counter from", "\"|\".join([room, message]) await self._websocket.send(to_send) self.logger.info(\">>> %s\", to_send) async def _set_team(self):", ":rtype: str \"\"\" return self._username @property def websocket_url(self) -> str:", "class PlayerNetwork(ABC): \"\"\" Network interface of a player. Responsible for", "user login. :return: The logged-in event :rtype: Event \"\"\" return", "to handle: %s\", message) # Showdown websocket messages are pipe-separated", "str): assert self.logged_in.is_set() await self._set_team() await self._send_message(f\"/challenge {username}, {format_}\") async", "from asyncio import Lock from asyncio import sleep from time", "level methods for basic tasks, such as changing avatar and", "Logger # pyre-ignore from poke_env.exceptions import ShowdownException from poke_env.player_configuration import", "to login as %s, showdown returned %s \"\"\" \"\"\"- this", "Split message contains information sent by the server. This information", "= perf_counter() while perf_counter() - start < wait_for: await sleep(checking_interval)", "except (CancelledError, RuntimeError) as e: self.logger.critical(\"Listen interrupted by %s\", e)", "the player. Returns a Logger displaying asctime and the player's", "module defines a base class for communicating with showdown servers.", "sent. Optional. :type message_2: str, optional \"\"\" if message_2: to_send", "id. If None, nothing happens. :type avatar_id: int \"\"\" await", "\"\"\" return self._logged_in @property def logger(self) -> Logger: # pyre-ignore", "might solve this problem.\"\"\", self.username, split_message[2], ) elif \"updatechallenges\" in", "\"\"\" import json import logging import requests import websockets #", "length 2. :param message: The message to send. :type message:", "The type of message is determined by the first entry", "= logging.getLogger(self._username) stream_handler = logging.StreamHandler() if log_level is not None:", "message is determined by the first entry in the message", "abc import abstractmethod from asyncio import CancelledError from asyncio import", "pyre-ignore self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore if start_listening: self._listening_coroutine", "await self._set_team() await self._send_message(f\"/search {format_}\") async def _send_message( self, message:", "The logged-in event :rtype: Event \"\"\" return self._logged_in @property def", "perf_counter from typing import List from typing import Optional from", "handling message:\\n%s\", message ) raise exception async def _log_in(self, split_message:", "except Exception as exception: self.logger.exception( \"Unhandled exception raised while handling", "interrupted by %s\", e) except Exception as e: self.logger.exception(e) finally:", "messages are pipe-separated sequences split_message = message.split(\"|\") assert len(split_message) >", "logger.addHandler(stream_handler) return logger async def _handle_message(self, message: str) -> None:", "used to send a sequence of length 2. :param message:", "+ \"%7C\" + split_message[3], }, ) self.logger.info(\"Sending authentication request\") assertion", "%s\", e) except Exception as e: self.logger.exception(e) finally: for coroutine", "method. Implementation should keep track of current challenges. \"\"\" @property", "logging.getLogger(self._username) stream_handler = logging.StreamHandler() if log_level is not None: logger.setLevel(log_level)", "Message received from the server that triggers logging in. :type", "Confirms successful login self.logged_in.set() elif not split_message[2].startswith(\" Guest \"): self.logger.warning(", "\"\"\" @property def logged_in(self) -> Event: \"\"\"Event object associated with", "-> str: \"\"\"The websocket url. It is derived from the", "format_: str): assert self.logged_in.is_set() await self._set_team() await self._send_message(f\"/challenge {username}, {format_}\")", "logging import requests import websockets # pyre-ignore from abc import", "websockets.connect( self.websocket_url, max_queue=None ) as websocket: self._websocket = websocket async", "player_configuration: Player configuration. :type player_configuration: PlayerConfiguration :param avatar: Player avatar", "self._listening_coroutine = ensure_future(self.listen()) async def _accept_challenge(self, username: str) -> None:", "intercepted. %s\", e) except Exception as exception: self.logger.exception( \"Unhandled exception", "= server_configuration.authentication_url self._avatar = avatar self._password = player_configuration.password self._username =", "import json import logging import requests import websockets # pyre-ignore", "message_2: Optional[str] = None ) -> None: \"\"\"Sends a message", "message, message_2]) else: to_send = \"|\".join([room, message]) await self._websocket.send(to_send) self.logger.info(\">>>", "% message) except CancelledError as e: self.logger.critical(\"CancelledError intercepted. %s\", e)", "- start < wait_for: await sleep(checking_interval) if self.logged_in: return assert", "\"\"\" await self._wait_for_login() if avatar_id is not None: await self._send_message(f\"/avatar", "Contain information about current challenge await self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"): #", "as %s, showdown returned %s \"\"\" \"\"\"- this might prevent", "return assert self.logged_in async def listen(self) -> None: \"\"\"Listen to", "except Exception as e: self.logger.exception(e) finally: for coroutine in coroutines:", "from asyncio import sleep from time import perf_counter from typing", "that triggers logging in. :type split_message: List[str] \"\"\" if self._password:", "wait_for: await sleep(checking_interval) if self.logged_in: return assert self.logged_in async def", "self._listening_coroutine is not None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async def", "username might solve this problem.\"\"\", self.username, split_message[2], ) elif \"updatechallenges\"", "requests import websockets # pyre-ignore from abc import ABC from", "Logger = self._create_player_logger(log_level) # pyre-ignore if start_listening: self._listening_coroutine = ensure_future(self.listen())", "pyre-ignore \"\"\"Logger associated with the player. :return: The logger. :rtype:", "from aiologger import Logger # pyre-ignore from poke_env.exceptions import ShowdownException", "int, optional :param log_level: The player's logger level. :type log_level:", "= ensure_future(self.listen()) async def _accept_challenge(self, username: str) -> None: assert", "%s\", message) elif split_message[1] in [\"nametaken\"]: self.logger.critical(\"Error message received: %s\",", "Event from asyncio import Lock from asyncio import sleep from", "authentication request\") assertion = json.loads(log_in_request.text[1:])[\"assertion\"] else: self.logger.info(\"Bypassing authentication request\") assertion", "information is necessary to log in. :param split_message: Message received", "to a showdown websocket and dispatch messages to be handled.\"\"\"", "websocket\") coroutines = [] try: async with websockets.connect( self.websocket_url, max_queue=None", "as e: self.logger.exception(e) finally: for coroutine in coroutines: coroutine.cancel() async", "split_message: List[str]) -> None: \"\"\"Abstract method. Implementation should keep track", "Logger: # pyre-ignore \"\"\"Logger associated with the player. :return: The", "bool \"\"\" self._authentication_url = server_configuration.authentication_url self._avatar = avatar self._password =", "logger level. :type log_level: int. Defaults to logging's default level.", "-> None: assert self.logged_in.is_set() await self._set_team() await self._send_message(\"/accept %s\" %", "server url. :return: The websocket url. :rtype: str \"\"\" return", "self._logger @property def username(self) -> str: \"\"\"The player's username. :return:", "ABC from abc import abstractmethod from asyncio import CancelledError from", "player_configuration: PlayerConfiguration, *, avatar: Optional[int] = None, log_level: Optional[int] =", "Optional[int] = None, log_level: Optional[int] = None, server_configuration: ServerConfiguration, start_listening:", ":type message: str :param room: The room to which the", "message) pass elif split_message[1] == \"popup\": self.logger.warning(\"Popup message received: %s\",", "_search_ladder_game(self, format_): await self._set_team() await self._send_message(f\"/search {format_}\") async def _send_message(", "List[str]) -> None: \"\"\"Abstract method. Implementation should keep track of", "log_level: The logger's level. :type log_level: int :return: The logger.", "stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async def _handle_message(self, message: str) ->", "server_configuration.server_url self._logged_in: Event = Event() self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol", "# Contain information about current challenge await self._update_challenges(split_message) elif split_message[0].startswith(\">battle\"):", "Logger displaying asctime and the player's username before messages. :param", "asyncio import CancelledError from asyncio import ensure_future from asyncio import", ":param player_configuration: Player configuration. :type player_configuration: PlayerConfiguration :param avatar: Player", "elif \"updatechallenges\" in split_message[1]: # Contain information about current challenge", "Optional[str] = None ) -> None: \"\"\"Sends a message to", "happens. :type avatar_id: int \"\"\" await self._wait_for_login() if avatar_id is", "received: %s\", message) elif split_message[1] == \"pm\": self.logger.info(\"Received pm: %s\",", "self._password: log_in_request = requests.post( self._authentication_url, data={ \"act\": \"login\", \"name\": self._username,", "`message_2` can be used to send a sequence of length", "# -*- coding: utf-8 -*- \"\"\"This module defines a base", "message) raise ShowdownException(\"Error message received: %s\", message) elif split_message[1] ==", "to the specified room. `message_2` can be used to send", "a sequence of length 2. :param message: The message to", "self.logged_in.is_set() await self._set_team() await self._send_message(f\"/challenge {username}, {format_}\") async def _change_avatar(self,", "except CancelledError as e: self.logger.critical(\"CancelledError intercepted. %s\", e) except Exception", "import requests import websockets # pyre-ignore from abc import ABC", "List from typing import Optional from aiologger import Logger #", "_change_avatar(self, avatar_id: Optional[int]) -> None: \"\"\"Changes the player's avatar. :param", "self.websocket_url, max_queue=None ) as websocket: self._websocket = websocket async for", "nothing happens. :type avatar_id: int \"\"\" await self._wait_for_login() if avatar_id", "(CancelledError, RuntimeError) as e: self.logger.critical(\"Listen interrupted by %s\", e) except", "await self._log_in(split_message) elif split_message[1] == \"updateuser\": if split_message[2] == \"", "\"\"\"Creates a logger for the player. Returns a Logger displaying", "{avatar_id}\") def _create_player_logger(self, log_level: Optional[int]) -> Logger: # pyre-ignore \"\"\"Creates", "\"\"\" try: self.logger.debug(\"Received message to handle: %s\", message) # Showdown", "await self._handle_battle_message(message) elif split_message[1] == \"updatesearch\": self.logger.debug(\"Ignored message: %s\", message)", "with showdown servers. Also implements some higher level methods for", "async def _accept_challenge(self, username: str) -> None: assert self.logged_in.is_set() await", "to showdown websocket\") coroutines = [] try: async with websockets.connect(", "as websocket: self._websocket = websocket async for message in websocket:", "split_message[1] == \"updateuser\": if split_message[2] == \" \" + self._username:", "this is the zero-th entry # Otherwise it is the", "None: \"\"\"Listen to a showdown websocket and dispatch messages to", ":type split_message: List[str] \"\"\" if self._password: log_in_request = requests.post( self._authentication_url,", "as e: self.logger.critical(\"CancelledError intercepted. %s\", e) except Exception as exception:", "The message to parse. :type message: str \"\"\" try: self.logger.debug(\"Received", "if self._password: log_in_request = requests.post( self._authentication_url, data={ \"act\": \"login\", \"name\":", "self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore if start_listening: self._listening_coroutine =", "event :rtype: Event \"\"\" return self._logged_in @property def logger(self) ->", "in [\"nametaken\"]: self.logger.critical(\"Error message received: %s\", message) raise ShowdownException(\"Error message", "send a sequence of length 2. :param message: The message", "await self._websocket.close() @abstractmethod async def _handle_battle_message(self, message: str) -> None:", "split_message[1] == \"updatesearch\": self.logger.debug(\"Ignored message: %s\", message) pass elif split_message[1]", "such as changing avatar and low-level message handling. \"\"\" def", "poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC): \"\"\"", "None: \"\"\"Abstract method. Implementation should keep track of current challenges.", "None: logger.setLevel(log_level) formatter = logging.Formatter( \"%(asctime)s - %(name)s - %(levelname)s", "async def _update_challenges(self, split_message: List[str]) -> None: \"\"\"Abstract method. Implementation", "typing import Optional from aiologger import Logger # pyre-ignore from", "logging.StreamHandler() if log_level is not None: logger.setLevel(log_level) formatter = logging.Formatter(", "entry in the message # For battles, this is the", "if split_message[1] == \"challstr\": # Confirms connection to the server:", "with websockets.connect( self.websocket_url, max_queue=None ) as websocket: self._websocket = websocket", "import logging import requests import websockets # pyre-ignore from abc", "split_message[1] == \"pm\": self.logger.info(\"Received pm: %s\", split_message) else: self.logger.critical(\"Unhandled message:", "self.logger.info(\"Received pm: %s\", split_message) else: self.logger.critical(\"Unhandled message: %s\", message) raise", "for the player. Returns a Logger displaying asctime and the", "# The type of message is determined by the first", "# pyre-ignore \"\"\"Creates a logger for the player. Returns a", "len(split_message) > 1 # The type of message is determined", "if start_listening: self._listening_coroutine = ensure_future(self.listen()) async def _accept_challenge(self, username: str)", "= Event() self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger:", "_accept_challenge(self, username: str) -> None: assert self.logged_in.is_set() await self._set_team() await", "the server that triggers logging in. :type split_message: List[str] \"\"\"", "pyre-ignore \"\"\"Creates a logger for the player. Returns a Logger", "websocket url. It is derived from the server url. :return:", "associated with the player. :return: The logger. :rtype: Logger \"\"\"", "the message should be sent. :type room: str :param message_2:", "a player. Responsible for communicating with showdown servers. Also implements", "str) -> None: assert self.logged_in.is_set() await self._set_team() await self._send_message(\"/accept %s\"", "elif split_message[1] == \"popup\": self.logger.warning(\"Popup message received: %s\", message) elif", "interface of a player. Responsible for communicating with showdown servers.", "async def _search_ladder_game(self, format_): await self._set_team() await self._send_message(f\"/search {format_}\") async", "= 0.001, wait_for: int = 5 ) -> None: start", "logger for the player. Returns a Logger displaying asctime and", "sent. :type room: str :param message_2: Second element of the", "self, message: str, room: str = \"\", message_2: Optional[str] =", "login self.logged_in.set() elif not split_message[2].startswith(\" Guest \"): self.logger.warning( \"\"\"Trying to", "self._handle_battle_message(message) elif split_message[1] == \"updatesearch\": self.logger.debug(\"Ignored message: %s\", message) pass", "the server: we can login await self._log_in(split_message) elif split_message[1] ==", "wait_for: int = 5 ) -> None: start = perf_counter()", "%s \"\"\" \"\"\"- this might prevent future actions from this", "room to which the message should be sent. :type room:", "in. :param split_message: Message received from the server that triggers", "split_message[1] in [\"nametaken\"]: self.logger.critical(\"Error message received: %s\", message) raise ShowdownException(\"Error", "split_message[2] + \"%7C\" + split_message[3], }, ) self.logger.info(\"Sending authentication request\")", "# pyre-ignore \"\"\"Logger associated with the player. :return: The logger.", "this agent. \"\"\" \"\"\"Changing the agent's username might solve this", "except websockets.exceptions.ConnectionClosedOK: self.logger.warning( \"Websocket connection with %s closed\", self.websocket_url )", "url. :return: The websocket url. :rtype: str \"\"\" return f\"ws://{self._server_url}/showdown/websocket\"", "websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore if", "%s\", e) except Exception as exception: self.logger.exception( \"Unhandled exception raised", "None, log_level: Optional[int] = None, server_configuration: ServerConfiguration, start_listening: bool =" ]
[ "import defaultdict import cPickle as pickle import re stopwords_custom =", "a) in enumerate(authors.iteritems()): if ':' not in a['fullname']: lname =", "if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000 ==", "import csv import argparse from unidecode import unidecode from nameparser", "pg = ' '.join(p) if len(pg) > len(p)*2-1: bins[pg].add(id) if", "'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model',", "not in a['fullname']: lname = a['fullname'] ngrams = zip(*[lname[j:] for", "len(bins[b]) > max_bin_size: del bins[b] return bins def bin_iFfL(authors): bins", "in a['fullname'] and len(a['name_first']) >= 2 and a['name_last']: bins[a['name_first'][0:2] +", "'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling',", "'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs',", "'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september',", "return bins def bin_offbylastone(authors): bins = defaultdict(set) for i, (id,", "j in range(nw)]) for p in ngrams: pg = '", "bins[pg].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk =", "binlabel, binv in bins: print binlabel + ';' + ','.join(map(str,", "a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk =", "= sorted([(len(bv), blabel, bv) for blabel, bv in bins.iteritems()], reverse=True)", "return bins def bin_fFfL(authors): bins = defaultdict(set) for i, (id,", "a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) #", "0: print_err(i+1) return bins def bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set)", "% 10000 == 0: print_err(i+1) return bins def bin_fFfL(authors): bins", "10000 == 0: print_err(i+1) return bins def bin_fullparsedname(authors): bins =", "return bins def main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle')", "'propulsion', 'classification', 'recommendation']) stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES", "bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL']) > 2: bins[a['fFiL']", "tokens = re.sub(\"[^\\w]\", \" \", a['name']).split() tokens = [v for", "10000 == 0: print_err(i+1) return bins def bin_fFfL(authors): bins =", "del bins[b] return bins def bin_ngrams(authors, n=15, max_bin_size=30): bins =", "> max_bin_size: del bins[b] return bins def bin_ngrams(authors, n=15, max_bin_size=30):", "return bins def bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set) for i,", "bins[b] return bins def bin_ngrams(authors, n=15, max_bin_size=30): bins = defaultdict(set)", "'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure',", "in enumerate(authors.iteritems()): if ':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined'])", "bins[a['metaphone_fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) # bk", "'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory',", "in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "':' not in a['name']: tokens = re.sub(\"[^\\w]\", \" \", a['name']).split()", "len(a['name_last']) >= 3 and len(a['fFiL']) > 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id)", "'based', 'research', 'language', 'technology', 'project', 'design', 'computer', 'control', 'object', 'internet',", "bins[a['iFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins", "for j in range(nw)]) for p in ngrams: pg =", "(id, a) in enumerate(authors.iteritems()): if ':' not in a['fFiL'] and", "0: print_err(i+1) return bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set)", "% 10000 == 0: print_err(i+1) # bk = bins.keys() #", "2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1) % 10000 == 0:", "def bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a)", "b in bk: # if len(bins[b]) > max_bin_size: # del", "bins[b] return bins def bin_metaphone(authors): bins = defaultdict(set) for i,", "bin_ngrams(authors, n=15, max_bin_size=30): bins = defaultdict(set) for i, (id, a)", "'technology', 'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop',", "i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname_joined']:", "in a['fullname']: lname = a['fullname'] ngrams = zip(*[lname[j:] for j", "== 0: print_err(i+1) bk = bins.keys() for b in bk:", "parser.parse_args() print_err(\"Loading pickled author pre-features\") authors = pickle.load(open(args.authorprefeat, 'rb')) bins", "bins[b] return bins def bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set) for", "bins def bin_metaphone(authors): bins = defaultdict(set) for i, (id, a)", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1)", "def main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?',", "'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical',", "bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id,", "'applications', 'jet', 'propulsion', 'classification', 'recommendation']) stopwords = stopwords_custom | npc.TITLES", "enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return", "in stopwords] ngrams = zip(*[tokens[j:] for j in range(nw)]) for", "enumerate(authors.iteritems()): if ':' not in a['name']: tokens = re.sub(\"[^\\w]\", \"", "return bins def bin_metaphone(authors): bins = defaultdict(set) for i, (id,", "bins def main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type',", "i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname']", "tokens = [v for v in tokens if len(v) >", "a) in enumerate(authors.iteritems()): if ':' not in a['name']: tokens =", "bin_token(authors, nw=2, max_bin_size=100): bins = defaultdict(set) for i, (id, a)", "npc from collections import defaultdict import cPickle as pickle import", "(i+1) % 10000 == 0: print_err(i+1) return bins def bin_fF3L(authors,", "(i+1) % 10000 == 0: print_err(i+1) return bins def bin_token(authors,", "max_bin_size=100): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0]", "len(pg) > len(p)*2-1: bins[pg].add(id) if (i+1) % 10000 == 0:", "bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id)", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1)", "(i+1) % 10000 == 0: print_err(i+1) return bins def bin_fFfL(authors):", "bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if", "'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student',", "bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id)", "0: print_err(i+1) return bins def bin_fullparsedname(authors): bins = defaultdict(set) for", "bins[''.join(p)].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk =", "if ':' not in a['fullname']: lname = a['fullname'] ngrams =", "def bin_iFfL(authors): bins = defaultdict(set) for i, (id, a) in", "unidecode import unidecode from nameparser import constants as npc from", "import cPickle as pickle import re stopwords_custom = set(['document', 'preparation',", "\" \", a['name']).split() tokens = [v for v in tokens", "a['fullname'] ngrams = zip(*[lname[j:] for j in range(n)]) for p", "bins def bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set) for i, (id,", "bins = sorted([(len(bv), blabel, bv) for blabel, bv in bins.iteritems()],", "+ ';' + ','.join(map(str, sorted(binv))) if __name__ == \"__main__\": main()", "len(bins[b]) > max_bin_size: del bins[b] return bins def bin_metaphone(authors): bins", "if ':' not in a['fFiL'] and len(a['name_last']) >= 3 and", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1)", "= bins.keys() # for b in bk: # if len(bins[b])", "s in stopwords_custom)): bins[''.join(p)].add(id) if (i+1) % 10000 == 0:", "not in stopwords] ngrams = zip(*[tokens[j:] for j in range(nw)])", "bin_iFfL(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "(i+1) % 10000 == 0: print_err(i+1) return bins def bin_fullparsedname(authors):", "zip(*[lname[j:] for j in range(n)]) for p in ngrams: if", "print_err(i+1) return bins def bin_fFfL(authors): bins = defaultdict(set) for i,", "bins[b] return bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for", "bins.keys() # for b in bk: # if len(bins[b]) >", "tokens if len(v) > 2 and v not in stopwords]", "range(nw)]) for p in ngrams: pg = ' '.join(p) if", "len(bins[b]) > max_bin_size: # del bins[b] return bins def bin_offbylastone(authors):", "[v for v in tokens if len(v) > 2 and", "bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id)", "(id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) % 10000 ==", "2 and v not in stopwords] ngrams = zip(*[tokens[j:] for", "(id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) % 10000 ==", "'rb')) bins = globals()[\"bin_\"+args.type](authors) bins = sorted([(len(bv), blabel, bv) for", "'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects',", "'research', 'language', 'technology', 'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion',", "default='iFfL') args = parser.parse_args() print_err(\"Loading pickled author pre-features\") authors =", "ngrams = zip(*[tokens[j:] for j in range(nw)]) for p in", "bins[a['fFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins", "% 10000 == 0: print_err(i+1) return bins def bin_fF3L(authors, max_bin_size=20):", "if len(a['name_last']) > 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1) %", "del bins[b] return bins def main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat',", "max_bin_size=30): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "in stopwords_custom)): bins[''.join(p)].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "a['name_last'][1:3]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk =", "i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) % 10000", "import argparse from unidecode import unidecode from nameparser import constants", "(id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname'] and", "in ngrams: if not any(((s in p) for s in", "npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors): bins", "in a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL']) > 2:", "len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000 == 0:", "common import * import csv import argparse from unidecode import", "i, (id, a) in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1)", "return bins def bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set) for i,", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if", "'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology', 'project',", "for i, (id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) %", "'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly',", "len(bins[b]) > max_bin_size: del bins[b] return bins def bin_ngrams(authors, n=15,", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not", "i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['name']:", "# bk = bins.keys() # for b in bk: #", "len(bins[b]) > max_bin_size: del bins[b] return bins def bin_2FoffbyoneL(authors, max_bin_size=30):", "cPickle as pickle import re stopwords_custom = set(['document', 'preparation', 'system',", "> 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1) % 10000 ==", "as pickle import re stopwords_custom = set(['document', 'preparation', 'system', 'consortium',", "(i+1) % 10000 == 0: print_err(i+1) return bins def bin_iFoffbyoneL(authors,", "pickle.load(open(args.authorprefeat, 'rb')) bins = globals()[\"bin_\"+args.type](authors) bins = sorted([(len(bv), blabel, bv)", "len(a['fFiL']) > 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1) % 10000", "| npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors): bins =", "'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world',", "bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "'distributed', 'based', 'research', 'language', 'technology', 'project', 'design', 'computer', 'control', 'object',", "# if len(bins[b]) > max_bin_size: # del bins[b] return bins", "a) in enumerate(authors.iteritems()): if ':' not in a['fullname'] and len(a['name_first'])", "enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return", "return bins def bin_ngrams(authors, n=15, max_bin_size=30): bins = defaultdict(set) for", "enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return", "0: print_err(i+1) # bk = bins.keys() # for b in", "== 0: print_err(i+1) return bins def bin_token(authors, nw=2, max_bin_size=100): bins", "n=15, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in", "print_err(i+1) return bins def bin_fullparsedname(authors): bins = defaultdict(set) for i,", "in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1)", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if", "nargs='?', default='iFfL') args = parser.parse_args() print_err(\"Loading pickled author pre-features\") authors", "bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del", "and v not in stopwords] ngrams = zip(*[tokens[j:] for j", "enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return", "in bk: # if len(bins[b]) > max_bin_size: # del bins[b]", "if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_fFiL(authors,", "'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological',", "> 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1) % 10000 ==", "if (i+1) % 10000 == 0: print_err(i+1) return bins def", "not in a['fullname'] and a['name_first'] and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id)", "a['fullname']: lname = a['fullname'] ngrams = zip(*[lname[j:] for j in", "def bin_offbylastone(authors): bins = defaultdict(set) for i, (id, a) in", "max_bin_size: del bins[b] return bins def bin_iFfL(authors): bins = defaultdict(set)", "0: print_err(i+1) return bins def bin_token(authors, nw=2, max_bin_size=100): bins =", "= [v for v in tokens if len(v) > 2", "bk = bins.keys() # for b in bk: # if", "print_err(i+1) return bins def bin_token(authors, nw=2, max_bin_size=100): bins = defaultdict(set)", "== 0: print_err(i+1) return bins def bin_fF3L(authors, max_bin_size=20): bins =", "(i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for", "10000 == 0: print_err(i+1) return bins def bin_token(authors, nw=2, max_bin_size=100):", "sorted([(len(bv), blabel, bv) for blabel, bv in bins.iteritems()], reverse=True) for", "a['name_first'] and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last']) > 1:", "if ':' not in a['fullname'] and len(a['name_first']) >= 2 and", "binlabel + ';' + ','.join(map(str, sorted(binv))) if __name__ == \"__main__\":", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if len(a['fFiL']) >", "i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname']:", "bins def bin_offbylastone(authors): bins = defaultdict(set) for i, (id, a)", "stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS def", "return bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i,", "'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet',", "range(n)]) for p in ngrams: if not any(((s in p)", "'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool',", "def bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a)", "nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args = parser.parse_args() print_err(\"Loading pickled", "'classification', 'recommendation']) stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES |", "in enumerate(authors.iteritems()): if len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if (i+1) %", "2: bins[a['fFiL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk", "'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed',", "== 0: print_err(i+1) return bins def bin_fFfL(authors): bins = defaultdict(set)", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if", "npc.CONJUNCTIONS def bin_exactsamename(authors): bins = defaultdict(set) for i, (id, a)", "'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis',", "print_err(i+1) return bins def bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set) for", "re.sub(\"[^\\w]\", \" \", a['name']).split() tokens = [v for v in", "def bin_exactsamename(authors): bins = defaultdict(set) for i, (id, a) in", "in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "def bin_metaphone(authors): bins = defaultdict(set) for i, (id, a) in", "\", a['name']).split() tokens = [v for v in tokens if", "bins def bin_token(authors, nw=2, max_bin_size=100): bins = defaultdict(set) for i,", "+ a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if", "# del bins[b] return bins def bin_offbylastone(authors): bins = defaultdict(set)", "'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation']) stopwords =", "1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) return", "default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args = parser.parse_args() print_err(\"Loading pickled author", "collections import defaultdict import cPickle as pickle import re stopwords_custom", "zip(*[tokens[j:] for j in range(nw)]) for p in ngrams: pg", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':'", "return bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i,", "bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in", "not in a['fullname'] and len(a['name_first']) >= 2 and a['name_last']: bins[a['name_first'][0:2]", "bin_exactsamename(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "> max_bin_size: del bins[b] return bins def bin_metaphone(authors): bins =", "len(v) > 2 and v not in stopwords] ngrams =", "stopwords_custom)): bins[''.join(p)].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk", "enumerate(authors.iteritems()): if len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if (i+1) % 10000", "bv) for blabel, bv in bins.iteritems()], reverse=True) for _, binlabel,", "in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "return bins def bin_iFfL(authors): bins = defaultdict(set) for i, (id,", "ngrams = zip(*[lname[j:] for j in range(n)]) for p in", "'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation']) stopwords", "not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if", "= set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed',", "'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology',", "'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work',", "import re stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee', 'international',", "del bins[b] return bins def bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set)", "(id, a) in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) %", "bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins", "= parser.parse_args() print_err(\"Loading pickled author pre-features\") authors = pickle.load(open(args.authorprefeat, 'rb'))", "'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test',", "in bins.iteritems()], reverse=True) for _, binlabel, binv in bins: print", "== 0: print_err(i+1) return bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins =", "for i, (id, a) in enumerate(authors.iteritems()): if len(a['fFiL']) > 2:", "for i, (id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) %", "in ngrams: pg = ' '.join(p) if len(pg) > len(p)*2-1:", "'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion',", "for b in bk: if len(bins[b]) > max_bin_size: del bins[b]", "max_bin_size: del bins[b] return bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins =", "enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return", "bins def bin_fullparsedname(authors): bins = defaultdict(set) for i, (id, a)", "'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications',", "bins[a['fullname_parsed']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins", "in enumerate(authors.iteritems()): if ':' not in a['fullname'] and a['name_first'] and", "in tokens if len(v) > 2 and v not in", "def bin_token(authors, nw=2, max_bin_size=100): bins = defaultdict(set) for i, (id,", "if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys()", "from nameparser import constants as npc from collections import defaultdict", "> 2: bins[a['fFiL']].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software',", "a['name']).split() tokens = [v for v in tokens if len(v)", "a) in enumerate(authors.iteritems()): if ':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if", "blabel, bv) for blabel, bv in bins.iteritems()], reverse=True) for _,", "% 10000 == 0: print_err(i+1) return bins def bin_samename(authors): bins", "| npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors): bins = defaultdict(set) for", ">= 3 and len(a['fFiL']) > 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if", "a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) % 10000 == 0:", "in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins", "'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation']) stopwords = stopwords_custom", "10000 == 0: print_err(i+1) bk = bins.keys() for b in", "# for b in bk: # if len(bins[b]) > max_bin_size:", "import unidecode from nameparser import constants as npc from collections", "max_bin_size: del bins[b] return bins def bin_fFiL(authors, max_bin_size=20): bins =", "b in bk: if len(bins[b]) > max_bin_size: del bins[b] return", "in enumerate(authors.iteritems()): if ':' not in a['fFiL'] and len(a['name_last']) >=", "npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors): bins = defaultdict(set) for i,", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if", "(id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) % 10000 ==", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if len(a['fFiL'])", "blabel, bv in bins.iteritems()], reverse=True) for _, binlabel, binv in", "for blabel, bv in bins.iteritems()], reverse=True) for _, binlabel, binv", "a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1)", "'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language',", ">= 2 and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last']) >", "max_bin_size: del bins[b] return bins def bin_metaphone(authors): bins = defaultdict(set)", "p) for s in stopwords_custom)): bins[''.join(p)].add(id) if (i+1) % 10000", "= argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args =", "bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def", "argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args = parser.parse_args()", "for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) %", "bins def bin_iFfL(authors): bins = defaultdict(set) for i, (id, a)", "bins.iteritems()], reverse=True) for _, binlabel, binv in bins: print binlabel", "len(bins[b]) > max_bin_size: del bins[b] return bins def main(): parser", "a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) % 10000 == 0:", "max_bin_size: # del bins[b] return bins def bin_offbylastone(authors): bins =", "% 10000 == 0: print_err(i+1) bk = bins.keys() for b", "and len(a['name_first']) >= 2 and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if", "(id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname']: lname", "len(bins[b]) > max_bin_size: del bins[b] return bins def bin_fFiL(authors, max_bin_size=20):", "re stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial',", "'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification',", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1)", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1)", "from unidecode import unidecode from nameparser import constants as npc", "a) in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) % 10000", "argparse from unidecode import unidecode from nameparser import constants as", "if ':' not in a['fullname'] and a['name_first'] and a['name_last']: bins[a['name_first'][0]", "def bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a)", "j in range(n)]) for p in ngrams: if not any(((s", "(id, a) in enumerate(authors.iteritems()): if ':' not in a['name']: tokens", "not in a['name']: tokens = re.sub(\"[^\\w]\", \" \", a['name']).split() tokens", "if len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if (i+1) % 10000 ==", "if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "+ a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if", "(id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) % 10000 ==", "i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) % 10000", "def bin_ngrams(authors, n=15, max_bin_size=30): bins = defaultdict(set) for i, (id,", "':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id)", "#!/usr/bin/env python from common import * import csv import argparse", "enumerate(authors.iteritems()): if ':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) >", "max_bin_size: del bins[b] return bins def bin_ngrams(authors, n=15, max_bin_size=30): bins", "if len(a['name_last']) > 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1) %", "def bin_samename(authors): bins = defaultdict(set) for i, (id, a) in", "in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "(i+1) % 10000 == 0: print_err(i+1) return bins def bin_samename(authors):", "'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology',", "del bins[b] return bins def bin_offbylastone(authors): bins = defaultdict(set) for", "del bins[b] return bins def bin_iFfL(authors): bins = defaultdict(set) for", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if a['metaphone_fullname']:", "bin_offbylastone(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id)", "if len(bins[b]) > max_bin_size: # del bins[b] return bins def", "bins[b] return bins def main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?',", "ngrams: pg = ' '.join(p) if len(pg) > len(p)*2-1: bins[pg].add(id)", "bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id,", "for s in stopwords_custom)): bins[''.join(p)].add(id) if (i+1) % 10000 ==", "'recommendation']) stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES", "for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) %", "pickled author pre-features\") authors = pickle.load(open(args.authorprefeat, 'rb')) bins = globals()[\"bin_\"+args.type](authors)", "from collections import defaultdict import cPickle as pickle import re", "* import csv import argparse from unidecode import unidecode from", "= re.sub(\"[^\\w]\", \" \", a['name']).split() tokens = [v for v", "len(a['name_first']) >= 2 and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last'])", "'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing',", "max_bin_size=20): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "for i, (id, a) in enumerate(authors.iteritems()): if ':' not in", "stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network',", "if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_2FoffbyoneL(authors,", "bin_fFfL(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "any(((s in p) for s in stopwords_custom)): bins[''.join(p)].add(id) if (i+1)", "':' not in a['fullname'] and len(a['name_first']) >= 2 and a['name_last']:", "in p) for s in stopwords_custom)): bins[''.join(p)].add(id) if (i+1) %", "python from common import * import csv import argparse from", "and len(a['name_last']) >= 3 and len(a['fFiL']) > 2: bins[a['fFiL'] +", "bins[a['fFiL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk =", "'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect',", "set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based',", "a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) % 10000 == 0:", "0: print_err(i+1) return bins def bin_fFfL(authors): bins = defaultdict(set) for", "defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id)", "if len(bins[b]) > max_bin_size: del bins[b] return bins def main():", "> max_bin_size: del bins[b] return bins def bin_fFiL(authors, max_bin_size=20): bins", "':' not in a['fullname']: lname = a['fullname'] ngrams = zip(*[lname[j:]", "% 10000 == 0: print_err(i+1) return bins def bin_token(authors, nw=2,", "> 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000", "bins def bin_ngrams(authors, n=15, max_bin_size=30): bins = defaultdict(set) for i,", "print_err(i+1) return bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for", "if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_metaphone(authors):", "authors = pickle.load(open(args.authorprefeat, 'rb')) bins = globals()[\"bin_\"+args.type](authors) bins = sorted([(len(bv),", "a['fullname'] and a['name_first'] and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last'])", "bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "> max_bin_size: # del bins[b] return bins def bin_offbylastone(authors): bins", "= ' '.join(p) if len(pg) > len(p)*2-1: bins[pg].add(id) if (i+1)", "defaultdict import cPickle as pickle import re stopwords_custom = set(['document',", "% 10000 == 0: print_err(i+1) return bins def bin_iFoffbyoneL(authors, max_bin_size=30):", "and a['name_first'] and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last']) >", "'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special',", "csv import argparse from unidecode import unidecode from nameparser import", "nw=2, max_bin_size=100): bins = defaultdict(set) for i, (id, a) in", "args = parser.parse_args() print_err(\"Loading pickled author pre-features\") authors = pickle.load(open(args.authorprefeat,", "== 0: print_err(i+1) return bins def bin_fullparsedname(authors): bins = defaultdict(set)", "bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a) in", "= a['fullname'] ngrams = zip(*[lname[j:] for j in range(n)]) for", "len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if (i+1) % 10000 == 0:", "for p in ngrams: if not any(((s in p) for", "in range(n)]) for p in ngrams: if not any(((s in", "def bin_fullparsedname(authors): bins = defaultdict(set) for i, (id, a) in", "del bins[b] return bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set)", "enumerate(authors.iteritems()): if ':' not in a['fullname'] and len(a['name_first']) >= 2", "> 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1) % 10000 ==", "pickle import re stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee',", "from common import * import csv import argparse from unidecode", "if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_ngrams(authors,", "| npc.CONJUNCTIONS def bin_exactsamename(authors): bins = defaultdict(set) for i, (id,", "stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES |", "i, (id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) % 10000", "'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation'])", "if ':' not in a['name']: tokens = re.sub(\"[^\\w]\", \" \",", "'.join(p) if len(pg) > len(p)*2-1: bins[pg].add(id) if (i+1) % 10000", "= globals()[\"bin_\"+args.type](authors) bins = sorted([(len(bv), blabel, bv) for blabel, bv", "a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0] +", "1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0:", "bk: # if len(bins[b]) > max_bin_size: # del bins[b] return", "return bins def bin_samename(authors): bins = defaultdict(set) for i, (id,", "v in tokens if len(v) > 2 and v not", "for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) %", "if ':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1:", "bins[b] return bins def bin_iFfL(authors): bins = defaultdict(set) for i,", "'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml',", "def bin_fFfL(authors): bins = defaultdict(set) for i, (id, a) in", "'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology', 'project', 'design', 'computer',", "bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id)", "bins[a['fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins", "% 10000 == 0: print_err(i+1) return bins def bin_fullparsedname(authors): bins", "bin_fullparsedname(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "0: print_err(i+1) bk = bins.keys() for b in bk: if", "for v in tokens if len(v) > 2 and v", "_, binlabel, binv in bins: print binlabel + ';' +", "bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id)", "= bins.keys() for b in bk: if len(bins[b]) > max_bin_size:", "in range(nw)]) for p in ngrams: pg = ' '.join(p)", "print_err(i+1) # bk = bins.keys() # for b in bk:", "> 2 and v not in stopwords] ngrams = zip(*[tokens[j:]", "lname = a['fullname'] ngrams = zip(*[lname[j:] for j in range(n)])", "ngrams: if not any(((s in p) for s in stopwords_custom)):", "bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in", "= defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if", "bk = bins.keys() for b in bk: if len(bins[b]) >", "not any(((s in p) for s in stopwords_custom)): bins[''.join(p)].add(id) if", "print_err(\"Loading pickled author pre-features\") authors = pickle.load(open(args.authorprefeat, 'rb')) bins =", "'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing',", "parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args = parser.parse_args() print_err(\"Loading", "npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors): bins = defaultdict(set)", "bins def bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set) for i, (id,", "a) in enumerate(authors.iteritems()): if ':' not in a['fFiL'] and len(a['name_last'])", "v not in stopwords] ngrams = zip(*[tokens[j:] for j in", "a) in enumerate(authors.iteritems()): if ':' not in a['fullname'] and a['name_first']", "nameparser import constants as npc from collections import defaultdict import", "a['fullname'] and len(a['name_first']) >= 2 and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id)", "= zip(*[lname[j:] for j in range(n)]) for p in ngrams:", "a['name']: tokens = re.sub(\"[^\\w]\", \" \", a['name']).split() tokens = [v", "len(a['name_last']) > 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1) % 10000", "= zip(*[tokens[j:] for j in range(nw)]) for p in ngrams:", "10000 == 0: print_err(i+1) return bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins", "and len(a['fFiL']) > 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1) %", "return bins def bin_fullparsedname(authors): bins = defaultdict(set) for i, (id,", "author pre-features\") authors = pickle.load(open(args.authorprefeat, 'rb')) bins = globals()[\"bin_\"+args.type](authors) bins", "and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0:2]", "+ a['name_last'][1:3]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk", "stopwords] ngrams = zip(*[tokens[j:] for j in range(nw)]) for p", "'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research',", "enumerate(authors.iteritems()): if ':' not in a['fullname'] and a['name_first'] and a['name_last']:", "binv in bins: print binlabel + ';' + ','.join(map(str, sorted(binv)))", "1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0:", "'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology', 'project', 'design',", "enumerate(authors.iteritems()): if ':' not in a['fullname']: lname = a['fullname'] ngrams", "import * import csv import argparse from unidecode import unidecode", "(id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id)", "for j in range(n)]) for p in ngrams: if not", "in enumerate(authors.iteritems()): if ':' not in a['fullname']: lname = a['fullname']", "2 and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last']) > 1:", "if len(pg) > len(p)*2-1: bins[pg].add(id) if (i+1) % 10000 ==", "a) in enumerate(authors.iteritems()): if len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if (i+1)", "'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming',", "max_bin_size: del bins[b] return bins def main(): parser = argparse.ArgumentParser()", "parser.add_argument('type', nargs='?', default='iFfL') args = parser.parse_args() print_err(\"Loading pickled author pre-features\")", "len(a['name_last']) > 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1) % 10000", "0: print_err(i+1) return bins def bin_samename(authors): bins = defaultdict(set) for", "== 0: print_err(i+1) # bk = bins.keys() # for b", "+ a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk", "'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation']) stopwords = stopwords_custom |", "if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_iFfL(authors):", "3 and len(a['fFiL']) > 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1)", "constants as npc from collections import defaultdict import cPickle as", "parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args", "'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new',", "10000 == 0: print_err(i+1) return bins def bin_fF3L(authors, max_bin_size=20): bins", "def bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a)", "> max_bin_size: del bins[b] return bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins", "in enumerate(authors.iteritems()): if ':' not in a['fullname'] and len(a['name_first']) >=", "if (i+1) % 10000 == 0: print_err(i+1) # bk =", "p in ngrams: pg = ' '.join(p) if len(pg) >", "for p in ngrams: pg = ' '.join(p) if len(pg)", "(id, a) in enumerate(authors.iteritems()): if len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if", "not in a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL']) >", "' '.join(p) if len(pg) > len(p)*2-1: bins[pg].add(id) if (i+1) %", "if len(v) > 2 and v not in stopwords] ngrams", "'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated',", "bins def bin_samename(authors): bins = defaultdict(set) for i, (id, a)", "bins[b] return bins def bin_offbylastone(authors): bins = defaultdict(set) for i,", "enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) % 10000 == 0:", "i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) % 10000", "bins: print binlabel + ';' + ','.join(map(str, sorted(binv))) if __name__", "bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id)", "a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) % 10000 == 0:", "print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b])", "in a['fullname'] and a['name_first'] and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if", "for i, (id, a) in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if", "main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL')", "p in ngrams: if not any(((s in p) for s", "bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a) in", "'network', 'distributed', 'based', 'research', 'language', 'technology', 'project', 'design', 'computer', 'control',", "for b in bk: # if len(bins[b]) > max_bin_size: #", "len(p)*2-1: bins[pg].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk", "bin_metaphone(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "globals()[\"bin_\"+args.type](authors) bins = sorted([(len(bv), blabel, bv) for blabel, bv in", "bv in bins.iteritems()], reverse=True) for _, binlabel, binv in bins:", "(id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) % 10000 ==", "in a['name']: tokens = re.sub(\"[^\\w]\", \" \", a['name']).split() tokens =", "i, (id, a) in enumerate(authors.iteritems()): if len(a['fFiL']) > 2: bins[a['fFiL']].add(id)", "bins = globals()[\"bin_\"+args.type](authors) bins = sorted([(len(bv), blabel, bv) for blabel,", "print_err(i+1) return bins def bin_samename(authors): bins = defaultdict(set) for i,", "if not any(((s in p) for s in stopwords_custom)): bins[''.join(p)].add(id)", "import constants as npc from collections import defaultdict import cPickle", "= stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS", "a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0:2] +", "'language', 'technology', 'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp',", "print binlabel + ';' + ','.join(map(str, sorted(binv))) if __name__ ==", "del bins[b] return bins def bin_metaphone(authors): bins = defaultdict(set) for", "bins def bin_fFfL(authors): bins = defaultdict(set) for i, (id, a)", "bin_samename(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()):", "return bins def bin_token(authors, nw=2, max_bin_size=100): bins = defaultdict(set) for", "> max_bin_size: del bins[b] return bins def bin_iFfL(authors): bins =", "'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process',", "10000 == 0: print_err(i+1) # bk = bins.keys() # for", "reverse=True) for _, binlabel, binv in bins: print binlabel +", "'jet', 'propulsion', 'classification', 'recommendation']) stopwords = stopwords_custom | npc.TITLES |", "== 0: print_err(i+1) return bins def bin_samename(authors): bins = defaultdict(set)", "in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) % 10000 ==", "a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) % 10000 == 0:", "| npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors):", "> max_bin_size: del bins[b] return bins def main(): parser =", "'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc',", "'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd',", "':' not in a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL'])", "i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fFiL']", "(i+1) % 10000 == 0: print_err(i+1) # bk = bins.keys()", "':' not in a['fullname'] and a['name_first'] and a['name_last']: bins[a['name_first'][0] +", "10000 == 0: print_err(i+1) return bins def bin_samename(authors): bins =", "> len(p)*2-1: bins[pg].add(id) if (i+1) % 10000 == 0: print_err(i+1)", "in enumerate(authors.iteritems()): if ':' not in a['name']: tokens = re.sub(\"[^\\w]\",", "a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1)", "a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) %", "for _, binlabel, binv in bins: print binlabel + ';'", "unidecode from nameparser import constants as npc from collections import", "enumerate(authors.iteritems()): if ':' not in a['fFiL'] and len(a['name_last']) >= 3", "in bins: print binlabel + ';' + ','.join(map(str, sorted(binv))) if", "i, (id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) % 10000", "pre-features\") authors = pickle.load(open(args.authorprefeat, 'rb')) bins = globals()[\"bin_\"+args.type](authors) bins =", "as npc from collections import defaultdict import cPickle as pickle", "= pickle.load(open(args.authorprefeat, 'rb')) bins = globals()[\"bin_\"+args.type](authors) bins = sorted([(len(bv), blabel,", "bins[a['fullname_joined']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins" ]
[ "def filters(self, *args, **kwargs): # real signature unknown pass def", "of __new__ def __new__(S, *more): # real signature unknown; restored", "*args, **kwargs): # real signature unknown pass def path(self, *args,", "signature unknown pass def currentFileName(self, *args, **kwargs): # real signature", "def nameFilters(self, *args, **kwargs): # real signature unknown pass def", "@staticmethod # known case of __new__ def __new__(S, *more): #", "**kwargs): # real signature unknown pass def currentFileName(self, *args, **kwargs):", "currentFileName(self, *args, **kwargs): # real signature unknown pass def currentFilePath(self,", "unknown pass def next(self, *args, **kwargs): # real signature unknown", "# no doc # imports import Shiboken as __Shiboken class", "# from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by generator 1.147 # no doc", "pass def filters(self, *args, **kwargs): # real signature unknown pass", "no doc # imports import Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object):", "__init__(self, *args, **kwargs): # real signature unknown pass @staticmethod #", "**kwargs): # real signature unknown pass def __init__(self, *args, **kwargs):", "# real signature unknown pass def nameFilters(self, *args, **kwargs): #", "<reponame>basepipe/developer_onboarding<gh_stars>1-10 # encoding: utf-8 # module PySide.QtCore # from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd", "real signature unknown; restored from __doc__ \"\"\" T.__new__(S, ...) ->", "**kwargs): # real signature unknown pass @staticmethod # known case", "*more): # real signature unknown; restored from __doc__ \"\"\" T.__new__(S,", "def __new__(S, *more): # real signature unknown; restored from __doc__", "PySide.QtCore # from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by generator 1.147 # no", "imports import Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no doc", "**kwargs): # real signature unknown pass def nameFilters(self, *args, **kwargs):", "*args, **kwargs): # real signature unknown pass def filters(self, *args,", "currentFileInfo(self, *args, **kwargs): # real signature unknown pass def currentFileName(self,", "def currentFileName(self, *args, **kwargs): # real signature unknown pass def", "1.147 # no doc # imports import Shiboken as __Shiboken", "**kwargs): # real signature unknown pass def filters(self, *args, **kwargs):", "signature unknown pass def filters(self, *args, **kwargs): # real signature", "filters(self, *args, **kwargs): # real signature unknown pass def hasNext(self,", "__new__ def __new__(S, *more): # real signature unknown; restored from", "# module PySide.QtCore # from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by generator 1.147", "# real signature unknown pass def __init__(self, *args, **kwargs): #", "real signature unknown pass def path(self, *args, **kwargs): # real", "real signature unknown pass def filters(self, *args, **kwargs): # real", "doc # imports import Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): #", "# by generator 1.147 # no doc # imports import", "unknown pass def currentFilePath(self, *args, **kwargs): # real signature unknown", "unknown pass def __init__(self, *args, **kwargs): # real signature unknown", "real signature unknown pass def hasNext(self, *args, **kwargs): # real", "pass def nameFilters(self, *args, **kwargs): # real signature unknown pass", "**kwargs): # real signature unknown pass def currentFilePath(self, *args, **kwargs):", "# real signature unknown pass def filters(self, *args, **kwargs): #", "pass def path(self, *args, **kwargs): # real signature unknown pass", "unknown; restored from __doc__ \"\"\" T.__new__(S, ...) -> a new", "# no doc def currentFileInfo(self, *args, **kwargs): # real signature", "as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no doc def currentFileInfo(self, *args,", "__doc__ \"\"\" T.__new__(S, ...) -> a new object with type", "pass def currentFilePath(self, *args, **kwargs): # real signature unknown pass", "# imports import Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no", "real signature unknown pass def currentFilePath(self, *args, **kwargs): # real", "real signature unknown pass def next(self, *args, **kwargs): # real", "def currentFileInfo(self, *args, **kwargs): # real signature unknown pass def", "*args, **kwargs): # real signature unknown pass def currentFilePath(self, *args,", "doc def currentFileInfo(self, *args, **kwargs): # real signature unknown pass", "by generator 1.147 # no doc # imports import Shiboken", "signature unknown pass def currentFilePath(self, *args, **kwargs): # real signature", "restored from __doc__ \"\"\" T.__new__(S, ...) -> a new object", "# real signature unknown pass def path(self, *args, **kwargs): #", "__Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no doc def currentFileInfo(self, *args, **kwargs):", "*args, **kwargs): # real signature unknown pass def next(self, *args,", "real signature unknown pass def __init__(self, *args, **kwargs): # real", "a new object with type S, a subtype of T", "object with type S, a subtype of T \"\"\" pass", "...) -> a new object with type S, a subtype", "__new__(S, *more): # real signature unknown; restored from __doc__ \"\"\"", "unknown pass def currentFileName(self, *args, **kwargs): # real signature unknown", "*args, **kwargs): # real signature unknown pass def hasNext(self, *args,", "signature unknown pass def __init__(self, *args, **kwargs): # real signature", "Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no doc def currentFileInfo(self,", "generator 1.147 # no doc # imports import Shiboken as", "# real signature unknown pass @staticmethod # known case of", "# encoding: utf-8 # module PySide.QtCore # from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd #", "import Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no doc def", "QAbstractFileEngineIterator(__Shiboken.Object): # no doc def currentFileInfo(self, *args, **kwargs): # real", "def path(self, *args, **kwargs): # real signature unknown pass def", "no doc def currentFileInfo(self, *args, **kwargs): # real signature unknown", "pass def hasNext(self, *args, **kwargs): # real signature unknown pass", "known case of __new__ def __new__(S, *more): # real signature", "unknown pass def path(self, *args, **kwargs): # real signature unknown", "from __doc__ \"\"\" T.__new__(S, ...) -> a new object with", "signature unknown pass def path(self, *args, **kwargs): # real signature", "# real signature unknown; restored from __doc__ \"\"\" T.__new__(S, ...)", "def next(self, *args, **kwargs): # real signature unknown pass def", "**kwargs): # real signature unknown pass def hasNext(self, *args, **kwargs):", "unknown pass def nameFilters(self, *args, **kwargs): # real signature unknown", "# real signature unknown pass def hasNext(self, *args, **kwargs): #", "next(self, *args, **kwargs): # real signature unknown pass def path(self,", "pass def __init__(self, *args, **kwargs): # real signature unknown pass", "currentFilePath(self, *args, **kwargs): # real signature unknown pass def filters(self,", "path(self, *args, **kwargs): # real signature unknown pass def __init__(self,", "encoding: utf-8 # module PySide.QtCore # from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by", "pass def next(self, *args, **kwargs): # real signature unknown pass", "case of __new__ def __new__(S, *more): # real signature unknown;", "signature unknown; restored from __doc__ \"\"\" T.__new__(S, ...) -> a", "**kwargs): # real signature unknown pass def next(self, *args, **kwargs):", "C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by generator 1.147 # no doc # imports", "class QAbstractFileEngineIterator(__Shiboken.Object): # no doc def currentFileInfo(self, *args, **kwargs): #", "*args, **kwargs): # real signature unknown pass def nameFilters(self, *args,", "signature unknown pass def nameFilters(self, *args, **kwargs): # real signature", "pass def currentFileName(self, *args, **kwargs): # real signature unknown pass", "*args, **kwargs): # real signature unknown pass def __init__(self, *args,", "T.__new__(S, ...) -> a new object with type S, a", "unknown pass def hasNext(self, *args, **kwargs): # real signature unknown", "real signature unknown pass def currentFileName(self, *args, **kwargs): # real", "module PySide.QtCore # from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by generator 1.147 #", "unknown pass def filters(self, *args, **kwargs): # real signature unknown", "from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by generator 1.147 # no doc #", "**kwargs): # real signature unknown pass def path(self, *args, **kwargs):", "signature unknown pass @staticmethod # known case of __new__ def", "def hasNext(self, *args, **kwargs): # real signature unknown pass def", "pass @staticmethod # known case of __new__ def __new__(S, *more):", "# real signature unknown pass def currentFilePath(self, *args, **kwargs): #", "utf-8 # module PySide.QtCore # from C:\\Python27\\lib\\site-packages\\PySide\\QtCore.pyd # by generator", "nameFilters(self, *args, **kwargs): # real signature unknown pass def next(self,", "real signature unknown pass def nameFilters(self, *args, **kwargs): # real", "*args, **kwargs): # real signature unknown pass @staticmethod # known", "def currentFilePath(self, *args, **kwargs): # real signature unknown pass def", "def __init__(self, *args, **kwargs): # real signature unknown pass @staticmethod", "real signature unknown pass @staticmethod # known case of __new__", "\"\"\" T.__new__(S, ...) -> a new object with type S,", "unknown pass @staticmethod # known case of __new__ def __new__(S,", "hasNext(self, *args, **kwargs): # real signature unknown pass def nameFilters(self,", "# real signature unknown pass def next(self, *args, **kwargs): #", "# real signature unknown pass def currentFileName(self, *args, **kwargs): #", "*args, **kwargs): # real signature unknown pass def currentFileName(self, *args,", "new object with type S, a subtype of T \"\"\"", "signature unknown pass def next(self, *args, **kwargs): # real signature", "-> a new object with type S, a subtype of", "signature unknown pass def hasNext(self, *args, **kwargs): # real signature", "# known case of __new__ def __new__(S, *more): # real" ]
[ ".00001524 00000-0 30197-4 0 9997 2 25544 51.6421 236.2139 0003381", "\"\"\"Declare the skip_if_no_mpl marker in pytest's '--markers' helper option This", "called for each test case. It looks if the test", "skip from unittest.mock import patch from pathlib import Path from", "return True else: return False def pytest_configure(config): \"\"\"Declare the skip_if_no_mpl", "case has the skip_if_no_mpl decorator. If so, skip the test", "installed\" ) def pytest_runtest_setup(item): \"\"\"This function is called for each", "containing basic data \"\"\" config.update({ \"eop\": { \"missing_policy\": \"pass\", }", "{ \"missing_policy\": \"pass\", } }) @fixture def common_env(): with patch('beyond.dates.date.EopDb.get')", "is not a dependency of the library, but merely a", "9999 2 24960 62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\",", "start = Date(2018, 4, 5, 16, 50) stop = timedelta(hours=15)", "as m: m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315,", "tai_utc=36.0 ) yield @fixture def station(common_env): return create_station('Toulouse', (43.604482, 1.443962,", "= timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step) @fixture def jplfiles(): config['env']", "51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731\"\"\") @fixture def molniya_tle(common_env): return", "decorator. If so, skip the test case \"\"\" if _skip_if_no_mpl()", "molniya_tle(common_env): return Tle(\"\"\"MOLNIYA 1-90 1 24960U 97054A 18123.22759647 .00000163 00000-0", "patch from pathlib import Path from beyond.config import config from", "from beyond.io.tle import Tle from beyond.propagators.keplernum import KeplerNum from beyond.dates", ".00000163 00000-0 24467-3 0 9999 2 24960 62.6812 182.7824 6470982", "5, 16, 50) stop = timedelta(hours=6) step = timedelta(seconds=15) return", "import numpy as np from pytest import fixture, mark, skip", "pytest_runtest_setup(item): \"\"\"This function is called for each test case. It", "config dict containing basic data \"\"\" config.update({ \"eop\": { \"missing_policy\":", "elif request.param == \"ephem\": start = Date(2018, 4, 5, 16,", "= Date(2018, 4, 5, 16, 50) stop = timedelta(hours=6) step", "00000-0 30197-4 0 9997 2 25544 51.6421 236.2139 0003381 47.8509", "numpy as np from pytest import fixture, mark, skip from", "stop=stop, step=step) @fixture def jplfiles(): config['env'] = { 'jpl': [", "return create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture def iss_tle(common_env): return Tle(\"\"\"ISS", "request.param == \"tle\": return orb elif request.param == \"ephem\": start", "orb elif request.param == \"ephem\": start = Date(2018, 4, 5,", "else: return False def pytest_configure(config): \"\"\"Declare the skip_if_no_mpl marker in", ") yield @fixture def station(common_env): return create_station('Toulouse', (43.604482, 1.443962, 172.))", "\"jpl\" / \"pck00010.tpc\"), str(Path(__file__).parent / \"data\" / \"jpl\" / \"gm_de431.tpc\"),", "/ \"pck00010.tpc\"), str(Path(__file__).parent / \"data\" / \"jpl\" / \"gm_de431.tpc\"), ]", "np from pytest import fixture, mark, skip from unittest.mock import", "from unittest.mock import patch from pathlib import Path from beyond.config", "import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\") def config_override(): \"\"\"Create a dummy", "timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step) elif request.param == \"kepler\": orb.propagator", "30197-4 0 9997 2 25544 51.6421 236.2139 0003381 47.8509 47.6767", "ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture def station(common_env): return create_station('Toulouse', (43.604482,", "16, 50) stop = timedelta(hours=6) step = timedelta(seconds=15) return orb.ephem(start=start,", "@fixture def jplfiles(): config['env'] = { 'jpl': [ str(Path(__file__).parent /", "lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture def station(common_env): return create_station('Toulouse',", "of the library, but merely a convenience \"\"\" try: import", "1 24960U 97054A 18123.22759647 .00000163 00000-0 24467-3 0 9999 2", "16, 50) stop = timedelta(hours=15) step = timedelta(minutes=1) return orb.ephem(start=start,", "matplotlib is not present as it is not a dependency", "dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture", "from beyond.config import config from beyond.dates.eop import Eop from beyond.frames.stations", "the skip_if_no_mpl marker in pytest's '--markers' helper option This has", "This has no actual effect on the tests \"\"\" config.addinivalue_line(", "scope=\"session\") def config_override(): \"\"\"Create a dummy config dict containing basic", "is called for each test case. It looks if the", "= KeplerNum( timedelta(seconds=60), get_body('Earth') ) return orb @fixture(params=[\"tle\", \"ephem\"]) def", "dummy config dict containing basic data \"\"\" config.update({ \"eop\": {", "try: import matplotlib.pyplot as plt except ImportError: return True else:", "effect on the tests \"\"\" config.addinivalue_line( \"markers\", \"skip_if_no_mpl: skip if", "/ \"jpl\" / \"pck00010.tpc\"), str(Path(__file__).parent / \"data\" / \"jpl\" /", "import Path from beyond.config import config from beyond.dates.eop import Eop", "create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture def iss_tle(common_env): return Tle(\"\"\"ISS (ZARYA)", "47.8509 47.6767 15.54198229111731\"\"\") @fixture def molniya_tle(common_env): return Tle(\"\"\"MOLNIYA 1-90 1", "orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth') ) return orb @fixture(params=[\"tle\", \"ephem\"])", "return orb elif request.param == \"ephem\": start = Date(2018, 4,", "with patch('beyond.dates.date.EopDb.get') as m: m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682,", "yield @fixture def station(common_env): return create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture", "x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 )", "00000-0 24467-3 0 9999 2 24960 62.6812 182.7824 6470982 294.8616", "/ \"gm_de431.tpc\"), ] } def _skip_if_no_mpl(): \"\"\"Specific for dynamically skipping", "/ \"de403_2000-2020.bsp\"), str(Path(__file__).parent / \"data\" / \"jpl\" / \"pck00010.tpc\"), str(Path(__file__).parent", "def config_override(): \"\"\"Create a dummy config dict containing basic data", "data \"\"\" config.update({ \"eop\": { \"missing_policy\": \"pass\", } }) @fixture", "@fixture def common_env(): with patch('beyond.dates.date.EopDb.get') as m: m.return_value = Eop(", "the library, but merely a convenience \"\"\" try: import matplotlib.pyplot", "beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\") def config_override(): \"\"\"Create a", "not installed\" ) def pytest_runtest_setup(item): \"\"\"This function is called for", "/ \"jpl\" / \"gm_de431.tpc\"), ] } def _skip_if_no_mpl(): \"\"\"Specific for", "config.update({ \"eop\": { \"missing_policy\": \"pass\", } }) @fixture def common_env():", "dependency of the library, but merely a convenience \"\"\" try:", "If so, skip the test case \"\"\" if _skip_if_no_mpl() and", "case. It looks if the test case has the skip_if_no_mpl", "beyond.dates import Date, timedelta from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True,", "looks if the test case has the skip_if_no_mpl decorator. If", "library, but merely a convenience \"\"\" try: import matplotlib.pyplot as", "\"ephem\"]) def molniya(request, molniya_tle): orb = molniya_tle.orbit() if request.param ==", "\"\"\" try: import matplotlib.pyplot as plt except ImportError: return True", "\"tle\": return orb elif request.param == \"ephem\": start = Date(2018,", "False def pytest_configure(config): \"\"\"Declare the skip_if_no_mpl marker in pytest's '--markers'", "except ImportError: return True else: return False def pytest_configure(config): \"\"\"Declare", "for dynamically skipping the test if matplotlib is not present", "jplfiles(): config['env'] = { 'jpl': [ str(Path(__file__).parent / \"data\" /", "convenience \"\"\" try: import matplotlib.pyplot as plt except ImportError: return", ") def pytest_runtest_setup(item): \"\"\"This function is called for each test", "from beyond.dates.eop import Eop from beyond.frames.stations import create_station from beyond.io.tle", "deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture def", "config from beyond.dates.eop import Eop from beyond.frames.stations import create_station from", "def jplfiles(): config['env'] = { 'jpl': [ str(Path(__file__).parent / \"data\"", "= timedelta(hours=15) step = timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step) @fixture", "import KeplerNum from beyond.dates import Date, timedelta from beyond.env.solarsystem import", "iss_tle(common_env): return Tle(\"\"\"ISS (ZARYA) 1 25544U 98067A 18124.55610684 .00001524 00000-0", "\"eop\": { \"missing_policy\": \"pass\", } }) @fixture def common_env(): with", "stop = timedelta(hours=15) step = timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step)", "def _skip_if_no_mpl(): \"\"\"Specific for dynamically skipping the test if matplotlib", "@fixture(params=[\"tle\", \"ephem\"]) def orbit(request, iss_tle): orb = iss_tle.orbit() if request.param", "beyond.propagators.keplernum import KeplerNum from beyond.dates import Date, timedelta from beyond.env.solarsystem", "present as it is not a dependency of the library,", "KeplerNum from beyond.dates import Date, timedelta from beyond.env.solarsystem import get_body", "test case \"\"\" if _skip_if_no_mpl() and list(item.iter_markers(name=\"skip_if_no_mpl\")): skip(\"matplotlib not installed\")", "skip if matplotlib is not installed\" ) def pytest_runtest_setup(item): \"\"\"This", "True else: return False def pytest_configure(config): \"\"\"Declare the skip_if_no_mpl marker", "from beyond.dates import Date, timedelta from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200)", "def molniya(request, molniya_tle): orb = molniya_tle.orbit() if request.param == \"tle\":", "import create_station from beyond.io.tle import Tle from beyond.propagators.keplernum import KeplerNum", "on the tests \"\"\" config.addinivalue_line( \"markers\", \"skip_if_no_mpl: skip if matplotlib", "the test case has the skip_if_no_mpl decorator. If so, skip", "step=step) elif request.param == \"kepler\": orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth')", "elif request.param == \"kepler\": orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth') )", "y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield", "the test if matplotlib is not present as it is", "step = timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step) @fixture def jplfiles():", "236.2139 0003381 47.8509 47.6767 15.54198229111731\"\"\") @fixture def molniya_tle(common_env): return Tle(\"\"\"MOLNIYA", "beyond.frames.stations import create_station from beyond.io.tle import Tle from beyond.propagators.keplernum import", "is not present as it is not a dependency of", "18123.22759647 .00000163 00000-0 24467-3 0 9999 2 24960 62.6812 182.7824", "in pytest's '--markers' helper option This has no actual effect", "a convenience \"\"\" try: import matplotlib.pyplot as plt except ImportError:", "KeplerNum( timedelta(seconds=60), get_body('Earth') ) return orb @fixture(params=[\"tle\", \"ephem\"]) def molniya(request,", "timedelta(hours=15) step = timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step) @fixture def", "it is not a dependency of the library, but merely", "import matplotlib.pyplot as plt except ImportError: return True else: return", "\"pck00010.tpc\"), str(Path(__file__).parent / \"data\" / \"jpl\" / \"gm_de431.tpc\"), ] }", "dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture def station(common_env): return", "9997 2 25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731\"\"\") @fixture", "\"ephem\"]) def orbit(request, iss_tle): orb = iss_tle.orbit() if request.param ==", "/ \"data\" / \"jpl\" / \"pck00010.tpc\"), str(Path(__file__).parent / \"data\" /", "return orb.ephem(start=start, stop=stop, step=step) elif request.param == \"kepler\": orb.propagator =", "str(Path(__file__).parent / \"data\" / \"jpl\" / \"pck00010.tpc\"), str(Path(__file__).parent / \"data\"", "Date(2018, 4, 5, 16, 50) stop = timedelta(hours=6) step =", "timedelta(seconds=60), get_body('Earth') ) return orb @fixture(params=[\"tle\", \"ephem\"]) def molniya(request, molniya_tle):", "182.7824 6470982 294.8616 12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"]) def orbit(request, iss_tle):", "the tests \"\"\" config.addinivalue_line( \"markers\", \"skip_if_no_mpl: skip if matplotlib is", "common_env(): with patch('beyond.dates.date.EopDb.get') as m: m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734,", "0003381 47.8509 47.6767 15.54198229111731\"\"\") @fixture def molniya_tle(common_env): return Tle(\"\"\"MOLNIYA 1-90", "Date(2018, 4, 5, 16, 50) stop = timedelta(hours=15) step =", "not a dependency of the library, but merely a convenience", "m: m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051,", "6470982 294.8616 12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"]) def orbit(request, iss_tle): orb", "'--markers' helper option This has no actual effect on the", "config.addinivalue_line( \"markers\", \"skip_if_no_mpl: skip if matplotlib is not installed\" )", "but merely a convenience \"\"\" try: import matplotlib.pyplot as plt", "12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"]) def orbit(request, iss_tle): orb = iss_tle.orbit()", "(ZARYA) 1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997", "dynamically skipping the test if matplotlib is not present as", "\"de403_2000-2020.bsp\"), str(Path(__file__).parent / \"data\" / \"jpl\" / \"pck00010.tpc\"), str(Path(__file__).parent /", "step=step) @fixture def jplfiles(): config['env'] = { 'jpl': [ str(Path(__file__).parent", "patch('beyond.dates.date.EopDb.get') as m: m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051,", "marker in pytest's '--markers' helper option This has no actual", "Tle(\"\"\"MOLNIYA 1-90 1 24960U 97054A 18123.22759647 .00000163 00000-0 24467-3 0", "24467-3 0 9999 2 24960 62.6812 182.7824 6470982 294.8616 12.8538", "unittest.mock import patch from pathlib import Path from beyond.config import", "_skip_if_no_mpl(): \"\"\"Specific for dynamically skipping the test if matplotlib is", "orb.ephem(start=start, stop=stop, step=step) elif request.param == \"kepler\": orb.propagator = KeplerNum(", "skip the test case \"\"\" if _skip_if_no_mpl() and list(item.iter_markers(name=\"skip_if_no_mpl\")): skip(\"matplotlib", "helper option This has no actual effect on the tests", "option This has no actual effect on the tests \"\"\"", "= molniya_tle.orbit() if request.param == \"tle\": return orb elif request.param", "5, 16, 50) stop = timedelta(hours=15) step = timedelta(minutes=1) return", "from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\") def config_override(): \"\"\"Create", "test case has the skip_if_no_mpl decorator. If so, skip the", "from pathlib import Path from beyond.config import config from beyond.dates.eop", "dict containing basic data \"\"\" config.update({ \"eop\": { \"missing_policy\": \"pass\",", "start = Date(2018, 4, 5, 16, 50) stop = timedelta(hours=6)", "\"\"\"Specific for dynamically skipping the test if matplotlib is not", "Tle(\"\"\"ISS (ZARYA) 1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0", "18124.55610684 .00001524 00000-0 30197-4 0 9997 2 25544 51.6421 236.2139", "@fixture def molniya_tle(common_env): return Tle(\"\"\"MOLNIYA 1-90 1 24960U 97054A 18123.22759647", "25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731\"\"\") @fixture def molniya_tle(common_env):", "def orbit(request, iss_tle): orb = iss_tle.orbit() if request.param == \"tle\":", "stop = timedelta(hours=6) step = timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step)", "if matplotlib is not present as it is not a", "\"pass\", } }) @fixture def common_env(): with patch('beyond.dates.date.EopDb.get') as m:", "\"jpl\" / \"de403_2000-2020.bsp\"), str(Path(__file__).parent / \"data\" / \"jpl\" / \"pck00010.tpc\"),", "molniya(request, molniya_tle): orb = molniya_tle.orbit() if request.param == \"tle\": return", "as it is not a dependency of the library, but", "station(common_env): return create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture def iss_tle(common_env): return", "ImportError: return True else: return False def pytest_configure(config): \"\"\"Declare the", "\"ephem\": start = Date(2018, 4, 5, 16, 50) stop =", "orb = molniya_tle.orbit() if request.param == \"tle\": return orb elif", "import patch from pathlib import Path from beyond.config import config", "== \"kepler\": orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth') ) return orb", "np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\") def config_override(): \"\"\"Create a dummy config dict", "/ \"jpl\" / \"de403_2000-2020.bsp\"), str(Path(__file__).parent / \"data\" / \"jpl\" /", "test if matplotlib is not present as it is not", "\"\"\" config.addinivalue_line( \"markers\", \"skip_if_no_mpl: skip if matplotlib is not installed\"", "timedelta from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\") def config_override():", "orb = iss_tle.orbit() if request.param == \"tle\": return orb elif", "[ str(Path(__file__).parent / \"data\" / \"jpl\" / \"de403_2000-2020.bsp\"), str(Path(__file__).parent /", "plt except ImportError: return True else: return False def pytest_configure(config):", "\"missing_policy\": \"pass\", } }) @fixture def common_env(): with patch('beyond.dates.date.EopDb.get') as", "62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"]) def orbit(request,", "Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0", "test case. It looks if the test case has the", "molniya_tle): orb = molniya_tle.orbit() if request.param == \"tle\": return orb", "50) stop = timedelta(hours=6) step = timedelta(seconds=15) return orb.ephem(start=start, stop=stop,", "\"markers\", \"skip_if_no_mpl: skip if matplotlib is not installed\" ) def", "3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"]) def orbit(request, iss_tle): orb = iss_tle.orbit() if", "the skip_if_no_mpl decorator. If so, skip the test case \"\"\"", "beyond.dates.eop import Eop from beyond.frames.stations import create_station from beyond.io.tle import", "function is called for each test case. It looks if", "= Date(2018, 4, 5, 16, 50) stop = timedelta(hours=15) step", "return Tle(\"\"\"MOLNIYA 1-90 1 24960U 97054A 18123.22759647 .00000163 00000-0 24467-3", "return orb.ephem(start=start, stop=stop, step=step) @fixture def jplfiles(): config['env'] = {", "{ 'jpl': [ str(Path(__file__).parent / \"data\" / \"jpl\" / \"de403_2000-2020.bsp\"),", "\"data\" / \"jpl\" / \"pck00010.tpc\"), str(Path(__file__).parent / \"data\" / \"jpl\"", "\"data\" / \"jpl\" / \"de403_2000-2020.bsp\"), str(Path(__file__).parent / \"data\" / \"jpl\"", "25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997 2 25544", "basic data \"\"\" config.update({ \"eop\": { \"missing_policy\": \"pass\", } })", "if the test case has the skip_if_no_mpl decorator. If so,", "24960U 97054A 18123.22759647 .00000163 00000-0 24467-3 0 9999 2 24960", "beyond.io.tle import Tle from beyond.propagators.keplernum import KeplerNum from beyond.dates import", "== \"ephem\": start = Date(2018, 4, 5, 16, 50) stop", "def pytest_configure(config): \"\"\"Declare the skip_if_no_mpl marker in pytest's '--markers' helper", "stop=stop, step=step) elif request.param == \"kepler\": orb.propagator = KeplerNum( timedelta(seconds=60),", "is not installed\" ) def pytest_runtest_setup(item): \"\"\"This function is called", "each test case. It looks if the test case has", "has no actual effect on the tests \"\"\" config.addinivalue_line( \"markers\",", "\"kepler\": orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth') ) return orb @fixture(params=[\"tle\",", "config_override(): \"\"\"Create a dummy config dict containing basic data \"\"\"", "str(Path(__file__).parent / \"data\" / \"jpl\" / \"de403_2000-2020.bsp\"), str(Path(__file__).parent / \"data\"", "97054A 18123.22759647 .00000163 00000-0 24467-3 0 9999 2 24960 62.6812", "matplotlib.pyplot as plt except ImportError: return True else: return False", "iss_tle.orbit() if request.param == \"tle\": return orb elif request.param ==", "1.443962, 172.)) @fixture def iss_tle(common_env): return Tle(\"\"\"ISS (ZARYA) 1 25544U", "}) @fixture def common_env(): with patch('beyond.dates.date.EopDb.get') as m: m.return_value =", "pytest_configure(config): \"\"\"Declare the skip_if_no_mpl marker in pytest's '--markers' helper option", "= Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477,", "a dependency of the library, but merely a convenience \"\"\"", "Date, timedelta from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\") def", "\"\"\"Create a dummy config dict containing basic data \"\"\" config.update({", "2 25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731\"\"\") @fixture def", "beyond.config import config from beyond.dates.eop import Eop from beyond.frames.stations import", "47.6767 15.54198229111731\"\"\") @fixture def molniya_tle(common_env): return Tle(\"\"\"MOLNIYA 1-90 1 24960U", "mark, skip from unittest.mock import patch from pathlib import Path", "1-90 1 24960U 97054A 18123.22759647 .00000163 00000-0 24467-3 0 9999", "request.param == \"ephem\": start = Date(2018, 4, 5, 16, 50)", "(43.604482, 1.443962, 172.)) @fixture def iss_tle(common_env): return Tle(\"\"\"ISS (ZARYA) 1", "@fixture(params=[\"tle\", \"ephem\"]) def molniya(request, molniya_tle): orb = molniya_tle.orbit() if request.param", "import Date, timedelta from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\")", "step = timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step) elif request.param ==", "from beyond.propagators.keplernum import KeplerNum from beyond.dates import Date, timedelta from", "fixture, mark, skip from unittest.mock import patch from pathlib import", "not present as it is not a dependency of the", "15.54198229111731\"\"\") @fixture def molniya_tle(common_env): return Tle(\"\"\"MOLNIYA 1-90 1 24960U 97054A", "dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture def station(common_env):", "pathlib import Path from beyond.config import config from beyond.dates.eop import", "str(Path(__file__).parent / \"data\" / \"jpl\" / \"gm_de431.tpc\"), ] } def", "Path from beyond.config import config from beyond.dates.eop import Eop from", "as np from pytest import fixture, mark, skip from unittest.mock", "It looks if the test case has the skip_if_no_mpl decorator.", "= iss_tle.orbit() if request.param == \"tle\": return orb elif request.param", "\"gm_de431.tpc\"), ] } def _skip_if_no_mpl(): \"\"\"Specific for dynamically skipping the", "] } def _skip_if_no_mpl(): \"\"\"Specific for dynamically skipping the test", "from beyond.frames.stations import create_station from beyond.io.tle import Tle from beyond.propagators.keplernum", "iss_tle): orb = iss_tle.orbit() if request.param == \"tle\": return orb", "pytest's '--markers' helper option This has no actual effect on", "molniya_tle.orbit() if request.param == \"tle\": return orb elif request.param ==", "so, skip the test case \"\"\" if _skip_if_no_mpl() and list(item.iter_markers(name=\"skip_if_no_mpl\")):", "\"jpl\" / \"gm_de431.tpc\"), ] } def _skip_if_no_mpl(): \"\"\"Specific for dynamically", "@fixture def station(common_env): return create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture def", "orbit(request, iss_tle): orb = iss_tle.orbit() if request.param == \"tle\": return", "/ \"data\" / \"jpl\" / \"de403_2000-2020.bsp\"), str(Path(__file__).parent / \"data\" /", "as plt except ImportError: return True else: return False def", "def station(common_env): return create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture def iss_tle(common_env):", "/ \"data\" / \"jpl\" / \"gm_de431.tpc\"), ] } def _skip_if_no_mpl():", "actual effect on the tests \"\"\" config.addinivalue_line( \"markers\", \"skip_if_no_mpl: skip", ") return orb @fixture(params=[\"tle\", \"ephem\"]) def molniya(request, molniya_tle): orb =", "294.8616 12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"]) def orbit(request, iss_tle): orb =", "50) stop = timedelta(hours=15) step = timedelta(minutes=1) return orb.ephem(start=start, stop=stop,", "return orb @fixture(params=[\"tle\", \"ephem\"]) def molniya(request, molniya_tle): orb = molniya_tle.orbit()", "for each test case. It looks if the test case", "timedelta(hours=6) step = timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step) elif request.param", "def molniya_tle(common_env): return Tle(\"\"\"MOLNIYA 1-90 1 24960U 97054A 18123.22759647 .00000163", "get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope=\"session\") def config_override(): \"\"\"Create a dummy config", "a dummy config dict containing basic data \"\"\" config.update({ \"eop\":", "2 24960 62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"])", "m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438,", "timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step) @fixture def jplfiles(): config['env'] =", "0 9997 2 25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731\"\"\")", "Tle from beyond.propagators.keplernum import KeplerNum from beyond.dates import Date, timedelta", "== \"tle\": return orb elif request.param == \"ephem\": start =", "@fixture def iss_tle(common_env): return Tle(\"\"\"ISS (ZARYA) 1 25544U 98067A 18124.55610684", "<gh_stars>0 import numpy as np from pytest import fixture, mark,", "the test case \"\"\" if _skip_if_no_mpl() and list(item.iter_markers(name=\"skip_if_no_mpl\")): skip(\"matplotlib not", "def iss_tle(common_env): return Tle(\"\"\"ISS (ZARYA) 1 25544U 98067A 18124.55610684 .00001524", "return Tle(\"\"\"ISS (ZARYA) 1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4", "1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997 2", "config['env'] = { 'jpl': [ str(Path(__file__).parent / \"data\" / \"jpl\"", "Eop from beyond.frames.stations import create_station from beyond.io.tle import Tle from", "4, 5, 16, 50) stop = timedelta(hours=15) step = timedelta(minutes=1)", "return False def pytest_configure(config): \"\"\"Declare the skip_if_no_mpl marker in pytest's", "} }) @fixture def common_env(): with patch('beyond.dates.date.EopDb.get') as m: m.return_value", "pytest import fixture, mark, skip from unittest.mock import patch from", "if request.param == \"tle\": return orb elif request.param == \"ephem\":", "tests \"\"\" config.addinivalue_line( \"markers\", \"skip_if_no_mpl: skip if matplotlib is not", "import config from beyond.dates.eop import Eop from beyond.frames.stations import create_station", "4, 5, 16, 50) stop = timedelta(hours=6) step = timedelta(seconds=15)", "'jpl': [ str(Path(__file__).parent / \"data\" / \"jpl\" / \"de403_2000-2020.bsp\"), str(Path(__file__).parent", "no actual effect on the tests \"\"\" config.addinivalue_line( \"markers\", \"skip_if_no_mpl:", "def common_env(): with patch('beyond.dates.date.EopDb.get') as m: m.return_value = Eop( x=-0.00951054166666622,", "172.)) @fixture def iss_tle(common_env): return Tle(\"\"\"ISS (ZARYA) 1 25544U 98067A", "def pytest_runtest_setup(item): \"\"\"This function is called for each test case.", "get_body('Earth') ) return orb @fixture(params=[\"tle\", \"ephem\"]) def molniya(request, molniya_tle): orb", "orb.ephem(start=start, stop=stop, step=step) @fixture def jplfiles(): config['env'] = { 'jpl':", "= timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step) elif request.param == \"kepler\":", "has the skip_if_no_mpl decorator. If so, skip the test case", "\"data\" / \"jpl\" / \"gm_de431.tpc\"), ] } def _skip_if_no_mpl(): \"\"\"Specific", "request.param == \"kepler\": orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth') ) return", "matplotlib is not installed\" ) def pytest_runtest_setup(item): \"\"\"This function is", "\"skip_if_no_mpl: skip if matplotlib is not installed\" ) def pytest_runtest_setup(item):", "import Eop from beyond.frames.stations import create_station from beyond.io.tle import Tle", "skipping the test if matplotlib is not present as it", "if matplotlib is not installed\" ) def pytest_runtest_setup(item): \"\"\"This function", "from pytest import fixture, mark, skip from unittest.mock import patch", "merely a convenience \"\"\" try: import matplotlib.pyplot as plt except", "\"\"\"This function is called for each test case. It looks", "skip_if_no_mpl marker in pytest's '--markers' helper option This has no", "\"\"\" config.update({ \"eop\": { \"missing_policy\": \"pass\", } }) @fixture def", "= timedelta(hours=6) step = timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step) elif", "= { 'jpl': [ str(Path(__file__).parent / \"data\" / \"jpl\" /", "98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997 2 25544 51.6421", "} def _skip_if_no_mpl(): \"\"\"Specific for dynamically skipping the test if", "skip_if_no_mpl decorator. If so, skip the test case \"\"\" if", "orb @fixture(params=[\"tle\", \"ephem\"]) def molniya(request, molniya_tle): orb = molniya_tle.orbit() if", "import Tle from beyond.propagators.keplernum import KeplerNum from beyond.dates import Date,", "import fixture, mark, skip from unittest.mock import patch from pathlib", "24960 62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009\"\"\") @fixture(params=[\"tle\", \"ephem\"]) def", "create_station from beyond.io.tle import Tle from beyond.propagators.keplernum import KeplerNum from", "@fixture(autouse=True, scope=\"session\") def config_override(): \"\"\"Create a dummy config dict containing", "0 9999 2 24960 62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009\"\"\")" ]
[ "x) == log(x + sin(y)) # Up to a constant,", "is this still correct? assert heurisch(5*x**5/( 2*x**6 - 5), x)", "== log(x + sqrt(2)) assert simplify(diff(heurisch(log(x + y + z),", "the rest of PMINT tests: # Airy functions # f", "/ 7 assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) -", "x) - g) == 0 @pytest.mark.slow def test_pmint_WrightOmega(): def omega(x):", "test_heurisch_exp(): assert heurisch(exp(x), x) == exp(x) assert heurisch(exp(-x), x) ==", "/ 2 assert heurisch(exp(-x**2), x) is None assert heurisch(2**x, x)", "log(x + sin(y)) # Up to a constant, where C", "when run via setup.py and cos(x) when run via py.test", "x**2/2 assert heurisch(x**17, x) == x**18/18 def test_heurisch_fractions(): assert heurisch(1/x,", "- 7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[])", "8*x - 8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2) g", "log(x) + exp(x))**2/x g = log(x**2 + 2*x*exp(x) + 2*x*log(x)", "5), x) in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 +", "heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert", "GHz def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x)", "for arg in expr.args if arg.has(x) ]) else: return expr", "+ sin(y)) # Up to a constant, where C =", "/ 12, 5*log(-2*x**6 + 5) / 12] assert heurisch(5*x**5/(2*x**6 +", "(y, 1, 2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1) assert", "/ 17 assert heurisch(x*exp(x), x) == x*exp(x) - exp(x) assert", "y), y)) == log(x + y + z) def test_heurisch_symbolic_coeffs_1130():", "x) == -1/x assert heurisch(-1/x**5, x) == 1/(4*x**4) def test_heurisch_log():", "1/(x + log(x) + exp(x)) + log(x + log(x) +", "heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1)", "5) / 12] assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6", "== g @pytest.mark.slow # 8 seconds on 3.4 GHz def", "f = Function('f') def test_components(): assert components(x*y, x) == {x}", "assert heurisch(1/(x + sin(y)), x) == log(x + sin(y)) #", "+ AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x)) # f = x**2", "result in the first case. The difference is because diofant", "test_heurisch_polynomials(): assert heurisch(1, x) == x assert heurisch(x, x) ==", "y) assert heurisch(1/(x + sqrt(2)), x) == log(x + sqrt(2))", "{x} assert components(sin(x), x) == {sin(x), x} assert components(sin(x)*sqrt(log(x)), x)", "x) == g def test_pmint_trig(): f = (x - tan(x))", "f = besselj(nu + 1, x)/besselj(nu, x) g = nu*log(x)", "+ 4*x**2), x, hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[])", "y = Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) -", "+ omega(x))))/(1 + omega(x))/(x + omega(x)) g = log(x +", "2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z,", "x) in [sin(x)**2 / 2, -cos(x)**2 / 2] assert heurisch(cos(x)/sin(x),", "x) == exp(17*x) / 17 assert heurisch(x*exp(x), x) == x*exp(x)", "+ y + z) def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert", "ring is RR. See # issue sympy/sympy#8685. assert heurisch(sqrt(1 +", "exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1) g = sqrt(pi)*log(erf(x)", "x) == -cos(x) assert heurisch(pi*sin(x) + 1, x) == x", "/ 2] assert heurisch(cos(x)/sin(x), x) == log(sin(x)) assert heurisch(x*sin(7*x), x)", "2*x**6 - 5), x) in [5*log(2*x**6 - 5) / 12,", "GHz def test_pmint_logexp(): f = (1 + x + x*exp(x))*(x", "f = sqrt(x**2/((y - x)*(y + x))) assert heurisch_wrapper(f, x)", "== x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2),", "Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y =", "- 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function,", "x)} assert components(f(x)*diff(f(x), x), x) == \\ {x, f(x), Derivative(f(x),", "assert heurisch(f(x), x) is None def test_heurisch_wrapper(): f = 1/(y", "heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x] def", "sqrt(-y))/2, I*log(x + I*sqrt(y)) / (2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))]", "# issue sympy/sympy#6926 f = sqrt(x**2/((y - x)*(y + x)))", "1/(x + exp(x) + log(x)) # TODO: Optimal solution is", "heurisch(pi*sin(x) + 1, x) == x - pi*cos(x) assert heurisch(cos(x),", "x) / (WhittakerW(mu, nu, x) * x) # g =", "+ 8*x - 8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2)", "components(f(x)*diff(f(x), x), x) == \\ {x, f(x), Derivative(f(x), x), Derivative(f(x),", "assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y +", "in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 + 5) /", "seconds on 3.4 GHz def test_pmint_logexp(): f = (1 +", "== sin(x) assert heurisch(tan(x), x) in [ log(1 + tan(x)**2)/2,", "x) == \\ {log(x), sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x)", "== {x} assert components(sin(x), x) == {sin(x), x} assert components(sin(x)*sqrt(log(x)),", "+ exp(x) - 1)/(x + log(x) + exp(x))**2/x g =", "heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x), x,", "g def test_RR(): # Make sure the algorithm does the", "x + x/LambertW(x) assert heurisch(f, x) == g @pytest.mark.xfail def", "assert heurisch(x*exp(x**2), x) == exp(x**2) / 2 assert heurisch(exp(-x**2), x)", "* AiryAi(x) # g = -AiryAi(x) + AiryAi(1, x)*x #", "x, hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[]) == x*li(x)", "== sinh(x) assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x) assert", "def test_heurisch_function(): assert heurisch(f(x), x) is None def test_heurisch_wrapper(): f", "- Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x), x) is None def", "I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 + y),", "# http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO: heurisch() is off by", "[ log(1 + tan(x)**2)/2, log(tan(x) + I) + I*x, log(tan(x)", "2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert heurisch(sqrt(x)**3, x) ==", "0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y', positive=True) assert", "/ 2, -cos(x)**2 / 2] assert heurisch(cos(x)/sin(x), x) == log(sin(x))", "# 8 seconds on 3.4 GHz def test_pmint_logexp(): f =", "assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x),", "== 5*log(2*x**6 + 5) / 12 assert heurisch(1/x**2, x) ==", "algorithm does the right thing if the ring is RR.", "heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x),", "components, heurisch, heurisch_wrapper __all__ = () x, y, z, nu", "]) else: return expr f = (x**7 - 24*x**4 -", "x), x) == \\ {x, f(x), Derivative(f(x), x), Derivative(f(x), x)}", "- 4*x**2), x, hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2),", "/ 12 assert heurisch(1/x**2, x) == -1/x assert heurisch(-1/x**5, x)", "+ I*x, log(tan(x) - I) - I*x, ] assert heurisch(sin(x)*sin(y),", "x) in [I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x", "f = (1 + omega(x) * (2 + cos(omega(x)) *", "-cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) # gives sin(x) in", "log(x) assert drop_const(ratsimp(heurisch(f, x)), x) == g def test_pmint_trig(): f", "assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function, x) ==", "Piecewise, Rational, Sum, Symbol, acos, asin, asinh, besselj, cos, cosh,", "via py.test assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2", "2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y", "assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x]", "== 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y = Symbol('y')", "+ 1, x) == x - pi*cos(x) assert heurisch(cos(x), x)", "assert heurisch_wrapper(f, x) == -log(x - y) f = 1/((y", "(4 + 8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3 +", "difference is because diofant changes # signs of expressions without", "LambertW, Piecewise, Rational, Sum, Symbol, acos, asin, asinh, besselj, cos,", "1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4) assert", "== -cos(x) assert heurisch(pi*sin(x) + 1, x) == x -", "2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x) + log(x)) #", "+ x*exp(x))*(x + log(x) + exp(x) - 1)/(x + log(x)", "hints=[]) == x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 -", "heurisch(1, x) == x assert heurisch(x, x) == x**2/2 assert", "/ (WhittakerW(mu, nu, x) * x) # g = x/2", "+ sin(LambertW(exp(x))) assert heurisch(f, x) == g def test_RR(): #", "issue sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \\", "I*log(log(x) + I)/2 - \\ I*log(log(x) - I)/2 # These", "asin(x/4), x) == 2*x - (sqrt(16 - x**2))*asin(x/4) \\ +", "# XXX ^ ^ ^ is this still correct? assert", "- AiryAi(x)**2) # g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x", "assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \\ {sin(x), root(x, 54), sqrt(sin(x)),", "+ log(x) + exp(x) - 1)/(x + log(x) + exp(x))**2/x", "gives None. Wrong besselj() derivative? f = besselj(nu + 1,", "test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y), x) == log(x + y)", "sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) -", "heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \\ 0.5*x*sqrt(0.25*x**2 + 1)", "+ y), x) == {x} assert components(sin(x), x) == {sin(x),", "== asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[]) == x*li(x) - Ei(2*log(x))", "heurisch(x*exp(x), x) == x*exp(x) - exp(x) assert heurisch(x*exp(x**2), x) ==", "is None def test_heurisch_wrapper(): f = 1/(y + x) assert", "heurisch(x*sin(7*x), x) == sin(7*x) / 49 - x*cos(7*x) / 7", "- g) == 0 @pytest.mark.slow def test_pmint_WrightOmega(): def omega(x): return", "5) / 12, 5*log(-2*x**6 + 5) / 12] assert heurisch(5*x**5/(2*x**6", "y = Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 + y), x) in", "[I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y))", "7*x**2), x, hints=[]) == x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert", "assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3", "g) == 0 f = (nu*besselj(nu, x) - x*besselj(nu +", "* (2 + cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x", "= 1/(y + x) assert heurisch_wrapper(f, x) == log(x +", "Piecewise( (-1/x, Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x +", "+ z) def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert heurisch_wrapper(1/(x**2 +", "erf(x)**2 - erf(x) + 1) g = sqrt(pi)*log(erf(x) - 1)/8", "assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs(): assert", "log(1 + tan(x)**2)/2, log(tan(x) + I) + I*x, log(tan(x) -", "Symbol, acos, asin, asinh, besselj, cos, cosh, diff, erf, exp,", "Ei, Eq, Function, I, Integral, LambertW, Piecewise, Rational, Sum, Symbol,", "x*cos(7*x) / 7 assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x)", "\\ Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y - log(x -", "x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14)", "constant: -3/4. Possibly different permutation # would give the optimal", "sinh(x) assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x) assert heurisch(", "I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y', positive=True)", "x) == sin(x) assert heurisch(tan(x), x) in [ log(1 +", "log(x + 2) assert heurisch(1/(x + sin(y)), x) == log(x", "sympy/sympy#6926 f = sqrt(x**2/((y - x)*(y + x))) assert heurisch_wrapper(f,", "== 0 @pytest.mark.slow def test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x)) f", "^ ^ ^ is this still correct? assert heurisch(5*x**5/( 2*x**6", "sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert", "+ exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x)", "-cos(x)**2 / 2] assert heurisch(cos(x)/sin(x), x) == log(sin(x)) assert heurisch(x*sin(7*x),", "besselj(nu + 1, x)/besselj(nu, x) g = nu*log(x) - log(besselj(nu,", "+ I*sqrt(y)) / (2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking():", "- log(besselj(nu, x)) assert simplify(heurisch(f, x) - g) == 0", "* asin(x/4), x) == 2*x - (sqrt(16 - x**2))*asin(x/4) \\", "of PMINT tests: # Airy functions # f = (x", "x, hints=[]) == x*li(x) - Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x),", "# Up to a constant, where C = 5*pi*I/12, Mathematica", "C = 5*pi*I/12, Mathematica gives identical # result in the", "assert simplify(heurisch(f, x) - g) == 0 @pytest.mark.slow def test_pmint_WrightOmega():", "- x)*(y + x))) assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 +", "= -x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2 assert heurisch(f,", "convert the rest of PMINT tests: # Airy functions #", "assert heurisch(sin(x), x) == -cos(x) assert heurisch(pi*sin(x) + 1, x)", "- 4*x**2 + 8*x - 8)/(x**8 + 6*x**6 + 12*x**4", "- 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4)", "\\ {log(x), sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x) == \\", "cosh, diff, erf, exp, li, log, pi, ratsimp, root, simplify,", "<filename>diofant/tests/integrals/test_heurisch.py import pytest from diofant import (Add, Derivative, Ei, Eq,", "+ y + z), y), y)) == log(x + y", "x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4", "def test_components(): assert components(x*y, x) == {x} assert components(1/(x +", "= (x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2) #", "z) def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert heurisch_wrapper(1/(x**2 + y),", "def test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2), x, hints=[]) == x*sqrt(1", "assert heurisch(sinh(x), x) == cosh(x) assert heurisch(cosh(x), x) == sinh(x)", "= (1 + x + x*exp(x))*(x + log(x) + exp(x)", "exp(x) + log(x)) # TODO: Optimal solution is g =", "assert heurisch(log(x), x) == x*log(x) - x assert heurisch(log(3*x), x)", "x) == {x} assert components(sin(x), x) == {sin(x), x} assert", "assert heurisch(-1/x**5, x) == 1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x), x)", "care. # XXX ^ ^ ^ is this still correct?", "assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5", "li, log, pi, ratsimp, root, simplify, sin, sinh, sqrt, symbols,", "49 - x*cos(7*x) / 7 assert heurisch(1/pi/4 * x**2*cos(x), x)", "- AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2) # g =", "1/(y + x) assert heurisch_wrapper(f, x) == log(x + y)", "log(x)**2)), x) == I*log(log(x) + I)/2 - \\ I*log(log(x) -", "log(sin(x)) assert heurisch(x*sin(7*x), x) == sin(7*x) / 49 - x*cos(7*x)", "+ 2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2", "24*x**4 - 4*x**2 + 8*x - 8)/(x**8 + 6*x**6 +", "in the first case. The difference is because diofant changes", "1)/(x + log(x) + exp(x))**2/x g = log(x**2 + 2*x*exp(x)", "{x, f(x), Derivative(f(x), x), Derivative(f(x), x)} def test_heurisch_polynomials(): assert heurisch(1,", "x) == x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1, 2),", "g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f, x)", "2*x] def test_heurisch_exp(): assert heurisch(exp(x), x) == exp(x) assert heurisch(exp(-x),", "tests: # Airy functions # f = (x - AiryAi(x)*AiryAi(1,", "def omega(x): return LambertW(exp(x)) f = (1 + omega(x) *", "test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert heurisch_wrapper(1/(x**2 + y), x) ==", "x} assert components(x*sin(exp(x)*y), x) == \\ {sin(y*exp(x)), x, exp(x)} assert", "== \\ Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y - log(x", "@pytest.mark.xfail def test_pmint_besselj(): # TODO: in both cases heurisch() gives", "1, 2)).function, z) == x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x), x)", "+ sqrt(2)) assert simplify(diff(heurisch(log(x + y + z), y), y))", "4) assert ratsimp(heurisch(f, x)) == g def test_pmint_LambertW(): f =", "-x + x*log(3) + x*log(x) assert heurisch(log(x**2), x) in [x*log(x**2)", "== log(x) assert heurisch(1/(2 + x), x) == log(x +", "+ sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7)", "= () x, y, z, nu = symbols('x,y,z,nu') f =", "on 3.4 GHz def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2", "(x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2) # g", "def test_heurisch_special(): assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi) assert", "sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) / (2*sqrt(y))", "0 f = (nu*besselj(nu, x) - x*besselj(nu + 1, x))/x", "- I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) / (2*sqrt(y)) -", "would give the optimal result? def drop_const(expr, x): if expr.is_Add:", "a lot of guidance to properly simplify heurisch() output. assert", "x - pi*cos(x) assert heurisch(cos(x), x) == sin(x) assert heurisch(tan(x),", "arg.has(x) ]) else: return expr f = (x**7 - 24*x**4", "# signs of expressions without any care. # XXX ^", "- 2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4), x) ==", "+ 1)/8 - sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f, x)) ==", "assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x)", "def test_pmint_LambertW(): f = LambertW(x) g = x*LambertW(x) - x", "(WhittakerW(mu, nu, x) * x) # g = x/2 -", "+ I) + I*x, log(tan(x) - I) - I*x, ]", "(2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1 +", "== \\ {sin(x), root(x, 54), sqrt(sin(x)), x} assert components(f(x), x)", "(x**2 - AiryAi(x)**2) # g = Rational(1,2)*ln(x + AiryAi(x)) +", "sinh, sqrt, symbols, tan) from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper", "def test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x)) f = (1 +", "- \\ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0, Eq(y,", "sin, sinh, sqrt, symbols, tan) from diofant.integrals.heurisch import components, heurisch,", "x) == exp(x**2) / 2 assert heurisch(exp(-x**2), x) is None", "assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \\ 0.5*x*sqrt(0.25*x**2 +", "== cosh(x) assert heurisch(cosh(x), x) == sinh(x) assert heurisch(x*sinh(x), x)", "y = Symbol('y') assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise(", "tan(x)**2)/2, log(tan(x) + I) + I*x, log(tan(x) - I) -", "== 1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x), x) == x*log(x) -", "heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x) assert heurisch( x*asinh(x/2), x)", "sqrt, symbols, tan) from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper __all__", "f = (1 + x + x*exp(x))*(x + log(x) +", "\\ {sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \\", "- \\ I*log(log(x) - I)/2 # These are examples from", "test_pmint_trig(): f = (x - tan(x)) / tan(x)**2 + tan(x)", "cos(x) when run via py.test assert heurisch(sin(x)*cos(x), x) in [sin(x)**2", "guidance to properly simplify heurisch() output. assert ratsimp(heurisch(f, x)) ==", "= (1 + omega(x) * (2 + cos(omega(x)) * (x", "2, -cos(x)**2 / 2] assert heurisch(cos(x)/sin(x), x) == log(sin(x)) assert", "+ 8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x)", "x) == Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True))", "does the right thing if the ring is RR. See", "y**2)) \\ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x", "x) == 2*x - (sqrt(16 - x**2))*asin(x/4) \\ + (sqrt(16", "diff, erf, exp, li, log, pi, ratsimp, root, simplify, sin,", "hints=[]) == x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 +", "+ 8*x**2) g = (4 + 8*x**2 + 6*x +", "x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4 def test_heurisch_mixed(): assert", "issue sympy/sympy#6926 f = sqrt(x**2/((y - x)*(y + x))) assert", "- x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) ==", "+ LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f, x) == g def", "LambertW(exp(x)) f = (1 + omega(x) * (2 + cos(omega(x))", "f(x)} assert components(Derivative(f(x), x), x) == \\ {x, f(x), Derivative(f(x),", "def test_heurisch_exp(): assert heurisch(exp(x), x) == exp(x) assert heurisch(exp(-x), x)", "2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi)", "else: return expr f = (x**7 - 24*x**4 - 4*x**2", "assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x) assert heurisch(x*cosh(x), x)", "assert ratsimp(heurisch(f, x)) == g def test_pmint_LambertW(): f = LambertW(x)", "Eq, Function, I, Integral, LambertW, Piecewise, Rational, Sum, Symbol, acos,", "== \\ {log(x), sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x) ==", "- y) f = 1/((y - x)*(y + x)) assert", "= Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x", "x**2))*asin(x/4) \\ + (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic():", "6*x**6 + 12*x**4 + 8*x**2) g = (4 + 8*x**2", "log(x + log(x) + exp(x)), # but Diofant requires a", "Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y', positive=True)", "Derivative(f(x), x), Derivative(f(x), x)} def test_heurisch_polynomials(): assert heurisch(1, x) ==", "exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \\ {sin(x), root(x, 54),", "exp(x))**2/x g = log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x)", "assert heurisch(exp(x), x) == exp(x) assert heurisch(exp(-x), x) == -exp(-x)", "sin(y)), x) == log(x + sin(y)) # Up to a", "exp(x**2) / 2 assert heurisch(exp(-x**2), x) is None assert heurisch(2**x,", "def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def", "f = 1/(y + x) assert heurisch_wrapper(f, x) == log(x", "def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) +", "(log(x + y)/2/y - log(x - y)/2/y, True)) # issue", "heurisch(1/(2 + x), x) == log(x + 2) assert heurisch(1/(x", "import components, heurisch, heurisch_wrapper __all__ = () x, y, z,", "+ x/LambertW(x) assert heurisch(f, x) == g @pytest.mark.xfail def test_pmint_besselj():", "x) assert simplify(heurisch(f, x) - g) == 0 @pytest.mark.slow def", "x**18/18 def test_heurisch_fractions(): assert heurisch(1/x, x) == log(x) assert heurisch(1/(2", "x, hints=[]) == x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1", "components(x*y, x) == {x} assert components(1/(x + y), x) ==", "+ 12*x**4 + 8*x**2) g = (4 + 8*x**2 +", "-x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2 assert heurisch(f, x)", "(1 + x + x*exp(x))*(x + log(x) + exp(x) -", "heurisch(acos(x/4) * asin(x/4), x) == 2*x - (sqrt(16 - x**2))*asin(x/4)", "+ exp(x))**2/x g = log(x**2 + 2*x*exp(x) + 2*x*log(x) +", "AiryAi(x) # g = -AiryAi(x) + AiryAi(1, x)*x # Whittaker", "heurisch_wrapper(1/(x**2 + y), x) == Piecewise( (-1/x, Eq(y, 0)), (-I*log(x", "log, pi, ratsimp, root, simplify, sin, sinh, sqrt, symbols, tan)", "heurisch(5*x**5/( 2*x**6 - 5), x) in [5*log(2*x**6 - 5) /", "Sum, Symbol, acos, asin, asinh, besselj, cos, cosh, diff, erf,", "test_heurisch_special(): assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x),", "Whittaker functions # f = WhittakerW(mu + 1, nu, x)", "2*x, 2*x*log(x) - 2*x] def test_heurisch_exp(): assert heurisch(exp(x), x) ==", "== x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4 def test_heurisch_mixed():", "assert heurisch(1/(x + sqrt(2)), x) == log(x + sqrt(2)) assert", "+ log(x)**2)/2 + 1/(x + exp(x) + log(x)) # TODO:", "== x*log(x) - x assert heurisch(log(3*x), x) == -x +", "a constant: -3/4. Possibly different permutation # would give the", "- 8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2) g =", "g = nu*log(x) - log(besselj(nu, x)) assert simplify(heurisch(f, x) -", "= 1/(x + log(x) + exp(x)) + log(x + log(x)", "import pytest from diofant import (Add, Derivative, Ei, Eq, Function,", "== g def test_pmint_LambertW(): f = LambertW(x) g = x*LambertW(x)", "- 5) / 12, 5*log(-2*x**6 + 5) / 12] assert", "x} assert components(sin(x)*sqrt(log(x)), x) == \\ {log(x), sin(x), sqrt(log(x)), x}", "+ log(x) assert drop_const(ratsimp(heurisch(f, x)), x) == g def test_pmint_trig():", "+ 5) / 12 assert heurisch(1/x**2, x) == -1/x assert", "+ omega(x))/(x + omega(x)) g = log(x + LambertW(exp(x))) +", "x)) == g def test_pmint_LambertW(): f = LambertW(x) g =", "return Add(*[ arg for arg in expr.args if arg.has(x) ])", "+ 7*x**2), x, hints=[]) == x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14)", "assert simplify(heurisch(f, x) - g) == 0 f = (nu*besselj(nu,", "2**x/log(2) assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y,", "by a constant: -3/4. Possibly different permutation # would give", "and cos(x) when run via py.test assert heurisch(sin(x)*cos(x), x) in", "1, nu, x) / (WhittakerW(mu, nu, x) * x) #", "+ y) assert heurisch(1/(x + sqrt(2)), x) == log(x +", "changes # signs of expressions without any care. # XXX", "f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1) g", "- g) == 0 f = (nu*besselj(nu, x) - x*besselj(nu", "heurisch(1/x**2, x) == -1/x assert heurisch(-1/x**5, x) == 1/(4*x**4) def", "without any care. # XXX ^ ^ ^ is this", "- sinh(x) assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x) assert", "x + x*exp(x))*(x + log(x) + exp(x) - 1)/(x +", "drop_const(ratsimp(heurisch(f, x)), x) == g def test_pmint_trig(): f = (x", "y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x * (1 +", "+ 6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x)", "x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4", "test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) == cosh(x) assert heurisch(cosh(x), x) ==", "-exp(-x) assert heurisch(exp(17*x), x) == exp(17*x) / 17 assert heurisch(x*exp(x),", "ratsimp(heurisch(f, x)) == g def test_pmint_LambertW(): f = LambertW(x) g", "x) == x**2/2 assert heurisch(x**17, x) == x**18/18 def test_heurisch_fractions():", "components(x*sin(exp(x)*y), x) == \\ {sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)),", "x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \\ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609():", "== -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) # gives sin(x)", "y + z), y), y)) == log(x + y +", "assert heurisch(1/(x * (1 + log(x)**2)), x) == I*log(log(x) +", "+ y), x) == Piecewise( (-1/x, Eq(y, 0)), (-I*log(x -", "x) == log(x + 2) assert heurisch(1/(x + sin(y)), x)", "0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x) # TODO: convert the rest", "f = (nu*besselj(nu, x) - x*besselj(nu + 1, x))/x g", "heurisch(cosh(x), x) == sinh(x) assert heurisch(x*sinh(x), x) == x*cosh(x) -", "pytest from diofant import (Add, Derivative, Ei, Eq, Function, I,", "heurisch(1/sqrt(x), x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert", "assert heurisch(cos(x), x) == sin(x) assert heurisch(tan(x), x) in [", "diofant import (Add, Derivative, Ei, Eq, Function, I, Integral, LambertW,", "heurisch(1/(x + sin(y)), x) == log(x + sin(y)) # Up", "(-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)),", "I)/2 # These are examples from the Poor Man's Integrator", "None assert heurisch(2**x, x) == 2**x/log(2) assert heurisch(x*2**x, x) ==", "I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2), x,", "y) f = 1/(y - x) assert heurisch_wrapper(f, x) ==", "[x*log(x**2) - 2*x, 2*x*log(x) - 2*x] def test_heurisch_exp(): assert heurisch(exp(x),", "Up to a constant, where C = 5*pi*I/12, Mathematica gives", "heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9 +", "== -2/sqrt(x) assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x)", "x) == 2**x/log(2) assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2)", "+ y**2)) \\ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609(): assert", "heurisch(log(3*x), x) == -x + x*log(3) + x*log(x) assert heurisch(log(x**2),", "== exp(x) assert heurisch(exp(-x), x) == -exp(-x) assert heurisch(exp(17*x), x)", "assert heurisch_wrapper(f, x) == log(x + y) f = 1/(y", "y, z, nu = symbols('x,y,z,nu') f = Function('f') def test_components():", "requires a lot of guidance to properly simplify heurisch() output.", "sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 -", "x) in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x] def test_heurisch_exp():", "heurisch(exp(-x**2), x) is None assert heurisch(2**x, x) == 2**x/log(2) assert", "\\ {sin(x), root(x, 54), sqrt(sin(x)), x} assert components(f(x), x) ==", "Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO: heurisch()", "tan) from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper __all__ = ()", "x*cosh(x) - sinh(x) assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x)", "assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 + 5) /", "the ring is RR. See # issue sympy/sympy#8685. assert heurisch(sqrt(1", "a constant, where C = 5*pi*I/12, Mathematica gives identical #", "- x/tan(x) + log(tan(x)**2 + 1)/2 assert heurisch(f, x) ==", "sqrt(sin(x)), x} assert components(f(x), x) == \\ {x, f(x)} assert", "def test_heurisch_log(): assert heurisch(log(x), x) == x*log(x) - x assert", "I*log(log(x) - I)/2 # These are examples from the Poor", "+ 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x", "x*besselj(nu + 1, x))/x g = besselj(nu, x) assert simplify(heurisch(f,", "(x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x) def", "== 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise(", "case. The difference is because diofant changes # signs of", "-2*sqrt(cos(x))**3/3 y = Symbol('y') assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) -", "= (4 + 8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3", "2 assert heurisch(exp(-x**2), x) is None assert heurisch(2**x, x) ==", "test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1)", "this still correct? assert heurisch(5*x**5/( 2*x**6 - 5), x) in", "== 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x), x)", "heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) #", "+ y), x) == log(x + y) assert heurisch(1/(x +", "root(x, 54), sqrt(sin(x)), x} assert components(f(x), x) == \\ {x,", "g = x*LambertW(x) - x + x/LambertW(x) assert heurisch(f, x)", "+ x) assert heurisch_wrapper(f, x) == log(x + y) f", "y), x) == {x} assert components(sin(x), x) == {sin(x), x}", "heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x)", "- x) assert heurisch_wrapper(f, x) == -log(x - y) f", "The difference is because diofant changes # signs of expressions", "None def test_heurisch_wrapper(): f = 1/(y + x) assert heurisch_wrapper(f,", "= log(x + LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f, x) ==", "XXX ^ ^ ^ is this still correct? assert heurisch(5*x**5/(", "+ 2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x)", "assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x)", "assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) # gives sin(x) in answer", "I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2), x, hints=[]) ==", "See # issue sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[])", "assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2),", "+ 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x) + log(x))", "Derivative(f(x), x)} def test_heurisch_polynomials(): assert heurisch(1, x) == x assert", "= WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu, x)", "(0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y',", "heurisch(f(x), x) is None def test_heurisch_wrapper(): f = 1/(y +", "test_RR(): # Make sure the algorithm does the right thing", "solution is g = 1/(x + log(x) + exp(x)) +", "\\ + (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert", "on 3.4 GHz def test_pmint_logexp(): f = (1 + x", "assert heurisch(exp(-x**2), x) is None assert heurisch(2**x, x) == 2**x/log(2)", "components(sin(x), x) == {sin(x), x} assert components(sin(x)*sqrt(log(x)), x) == \\", "= log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x)", "@pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_logexp(): f", "True)) # issue sympy/sympy#6926 f = sqrt(x**2/((y - x)*(y +", "# result in the first case. The difference is because", "def test_pmint_trig(): f = (x - tan(x)) / tan(x)**2 +", "== {x} assert components(1/(x + y), x) == {x} assert", "== log(x + 2) assert heurisch(1/(x + sin(y)), x) ==", "54), sqrt(sin(x)), x} assert components(f(x), x) == \\ {x, f(x)}", "0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y", "x) == \\ {sin(x), root(x, 54), sqrt(sin(x)), x} assert components(f(x),", "+ exp(x) + log(x)) # TODO: Optimal solution is g", "asinh(x/2) - x*sqrt(4 + x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x)", "# would give the optimal result? def drop_const(expr, x): if", "assert components(f(x)*diff(f(x), x), x) == \\ {x, f(x), Derivative(f(x), x),", "+ log(tan(x)**2 + 1)/2 assert heurisch(f, x) == g @pytest.mark.slow", "return LambertW(exp(x)) f = (1 + omega(x) * (2 +", "nu*log(x) - log(besselj(nu, x)) assert simplify(heurisch(f, x) - g) ==", "I*sqrt(y)) / (2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert", "besselj(nu, x) assert simplify(heurisch(f, x) - g) == 0 @pytest.mark.slow", "exp(x) assert heurisch(exp(-x), x) == -exp(-x) assert heurisch(exp(17*x), x) ==", "== x*li(x) - Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x), x) is", "- 4) assert ratsimp(heurisch(f, x)) == g def test_pmint_LambertW(): f", "simplify heurisch() output. assert ratsimp(heurisch(f, x)) == g @pytest.mark.slow #", "functions # f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2", "heurisch(tan(x), x) in [ log(1 + tan(x)**2)/2, log(tan(x) + I)", "assert heurisch(cos(x)/sin(x), x) == log(sin(x)) assert heurisch(x*sin(7*x), x) == sin(7*x)", "x) == sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x +", "omega(x))/(x + omega(x)) g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x)))", "heurisch_wrapper(f, x) == log(x + y) f = 1/(y -", "1)/2 assert heurisch(f, x) == g @pytest.mark.slow # 8 seconds", "identical # result in the first case. The difference is", "8*x**2) g = (4 + 8*x**2 + 6*x + 3*x**3)/(x**5", "- I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y',", "sqrt(2)), x) == log(x + sqrt(2)) assert simplify(diff(heurisch(log(x + y", "heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x))", "12] assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 + 5)", "- erf(x)**2 - erf(x) + 1) g = sqrt(pi)*log(erf(x) -", "positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y def", "Function, I, Integral, LambertW, Piecewise, Rational, Sum, Symbol, acos, asin,", "x) == 5*log(2*x**6 + 5) / 12 assert heurisch(1/x**2, x)", "sinh(x) assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x) assert heurisch(x*cosh(x),", "# These are examples from the Poor Man's Integrator #", "- I*x, ] assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y),", "because diofant changes # signs of expressions without any care.", "assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y,", "assert heurisch(exp(17*x), x) == exp(17*x) / 17 assert heurisch(x*exp(x), x)", "test_heurisch_log(): assert heurisch(log(x), x) == x*log(x) - x assert heurisch(log(3*x),", "7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) ==", "tan(x)) / tan(x)**2 + tan(x) g = -x**2/2 - x/tan(x)", "examples from the Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat():", "besselj, cos, cosh, diff, erf, exp, li, log, pi, ratsimp,", "x) in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 + 5)", "any care. # XXX ^ ^ ^ is this still", "x)*(y + x))) assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2))", "\\ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0, Eq(y, 0)),", "- x*besselj(nu + 1, x))/x g = besselj(nu, x) assert", "f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x), x), x) == \\ {x,", "+ 6*x**6 + 12*x**4 + 8*x**2) g = (4 +", "f = (x**7 - 24*x**4 - 4*x**2 + 8*x -", "simplify(heurisch(f, x) - g) == 0 @pytest.mark.slow def test_pmint_WrightOmega(): def", "expr.is_Add: return Add(*[ arg for arg in expr.args if arg.has(x)", "0.25*x**2), x, hints=[]) == \\ 0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x)", "x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \\ {sin(x), root(x,", "+ x + x*exp(x))*(x + log(x) + exp(x) - 1)/(x", "g) == 0 @pytest.mark.slow def test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x))", "(heurisch(sqrt(1 + 7*x**2), x, hints=[]) == x*sqrt(1 + 7*x**2)/2 +", "x) - x*besselj(nu + 1, x))/x g = besselj(nu, x)", "omega(x))))/(1 + omega(x))/(x + omega(x)) g = log(x + LambertW(exp(x)))", "asinh, besselj, cos, cosh, diff, erf, exp, li, log, pi,", "= (x**7 - 24*x**4 - 4*x**2 + 8*x - 8)/(x**8", "{x, f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x), x), x) == \\", "x) == {x} assert components(1/(x + y), x) == {x}", "x) == -exp(-x) assert heurisch(exp(17*x), x) == exp(17*x) / 17", "{sin(x), root(x, 54), sqrt(sin(x)), x} assert components(f(x), x) == \\", "in [I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x +", "+ 0.25*x**2), x, hints=[]) == \\ 0.5*x*sqrt(0.25*x**2 + 1) +", "acos, asin, asinh, besselj, cos, cosh, diff, erf, exp, li,", "\\ {x, f(x)} assert components(Derivative(f(x), x), x) == \\ {x,", "54)/sqrt(sin(x)), x) == \\ {sin(x), root(x, 54), sqrt(sin(x)), x} assert", "exp(x)) + log(x + log(x) + exp(x)), # but Diofant", "y)/2/y - log(x - y)/2/y, True)) # issue sympy/sympy#6926 f", "x) in [ log(1 + tan(x)**2)/2, log(tan(x) + I) +", "def test_sympyissue_3609(): assert heurisch(1/(x * (1 + log(x)**2)), x) ==", "sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \\ 0.5*x*sqrt(0.25*x**2", "3.4 GHz def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 -", "output. assert ratsimp(heurisch(f, x)) == g @pytest.mark.slow # 8 seconds", "y)) == log(x + y + z) def test_heurisch_symbolic_coeffs_1130(): y", "assert ratsimp(heurisch(f, x)) == g @pytest.mark.slow # 8 seconds on", "== log(x + sin(y)) # Up to a constant, where", "None. Wrong besselj() derivative? f = besselj(nu + 1, x)/besselj(nu,", "= symbols('x,y,z,nu') f = Function('f') def test_components(): assert components(x*y, x)", "simplify(diff(heurisch(log(x + y + z), y), y)) == log(x +", "x) == \\ {x, f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x), x),", "sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == asin(2*x/3)/2 assert", "TODO: convert the rest of PMINT tests: # Airy functions", "test_pmint_logexp(): f = (1 + x + x*exp(x))*(x + log(x)", "sqrt(x**2/((y - x)*(y + x))) assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2", "= Symbol('y') assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise( (-1/x,", "Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x), x) is None def test_heurisch_wrapper():", "first case. The difference is because diofant changes # signs", "- exp(x) assert heurisch(x*exp(x**2), x) == exp(x**2) / 2 assert", "assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1", "Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y - log(x - y)/2/y,", "(heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x,", "x) == x*exp(x) - exp(x) assert heurisch(x*exp(x**2), x) == exp(x**2)", "nu = symbols('x,y,z,nu') f = Function('f') def test_components(): assert components(x*y,", "AiryAi(x)) # f = x**2 * AiryAi(x) # g =", "4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y), x) == log(x", "nu, x) / (WhittakerW(mu, nu, x) * x) # g", "x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) == cosh(x) assert heurisch(cosh(x),", "-log(x - y) f = 1/((y - x)*(y + x))", "- I) - I*x, ] assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y)", "x) == log(x) assert heurisch(1/(2 + x), x) == log(x", "__all__ = () x, y, z, nu = symbols('x,y,z,nu') f", "x) == I*log(log(x) + I)/2 - \\ I*log(log(x) - I)/2", "constant, where C = 5*pi*I/12, Mathematica gives identical # result", "== \\ 0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x) # TODO: convert", "def test_heurisch_wrapper(): f = 1/(y + x) assert heurisch_wrapper(f, x)", "+ sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) /", "+ x*log(3) + x*log(x) assert heurisch(log(x**2), x) in [x*log(x**2) -", "heurisch(2**x, x) == 2**x/log(2) assert heurisch(x*2**x, x) == x*2**x/log(2) -", "assert heurisch(1/(2 + x), x) == log(x + 2) assert", "+ 5) / 12] assert heurisch(5*x**5/(2*x**6 + 5), x) ==", "heurisch(1/(x + y), x) == log(x + y) assert heurisch(1/(x", "py.test assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2 /", "cos, cosh, diff, erf, exp, li, log, pi, ratsimp, root,", "- 1)/(x + log(x) + exp(x))**2/x g = log(x**2 +", "12*x**4 + 8*x**2) g = (4 + 8*x**2 + 6*x", "f = x**2 * AiryAi(x) # g = -AiryAi(x) +", "Derivative, Ei, Eq, Function, I, Integral, LambertW, Piecewise, Rational, Sum,", "exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3,", "\\ I*log(log(x) - I)/2 # These are examples from the", "right thing if the ring is RR. See # issue", "x), x) == \\ {x, f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x),", "x*exp(x))*(x + log(x) + exp(x) - 1)/(x + log(x) +", "Derivative(f(x), x)} assert components(f(x)*diff(f(x), x), x) == \\ {x, f(x),", "- x)*(y + x)) assert heurisch_wrapper(f, x) == \\ Piecewise((1/x,", "+ tan(x)**2)/2, log(tan(x) + I) + I*x, log(tan(x) - I)", "heurisch(exp(x), x) == exp(x) assert heurisch(exp(-x), x) == -exp(-x) assert", "Wrong besselj() derivative? f = besselj(nu + 1, x)/besselj(nu, x)", "Add(*[ arg for arg in expr.args if arg.has(x) ]) else:", "x)*(y + x)) assert heurisch_wrapper(f, x) == \\ Piecewise((1/x, Eq(y,", "def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x)", "tan(x)**2 + tan(x) g = -x**2/2 - x/tan(x) + log(tan(x)**2", "5*pi*I/12, Mathematica gives identical # result in the first case.", "5) / 12 assert heurisch(1/x**2, x) == -1/x assert heurisch(-1/x**5,", "I*x, log(tan(x) - I) - I*x, ] assert heurisch(sin(x)*sin(y), x)", "- AiryAi(x)) # f = x**2 * AiryAi(x) # g", "Rational(1,2)*ln(x - AiryAi(x)) # f = x**2 * AiryAi(x) #", "x) == -2/sqrt(x) assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)),", "2*x*log(x) - 2*x] def test_heurisch_exp(): assert heurisch(exp(x), x) == exp(x)", "Rational, Sum, Symbol, acos, asin, asinh, besselj, cos, cosh, diff,", "assert components(x*sin(exp(x)*y), x) == \\ {sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17,", "+ I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 +", "1, 2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z,", "Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO: heurisch() is", "x) * x) # g = x/2 - mu*ln(x) -", "= besselj(nu + 1, x)/besselj(nu, x) g = nu*log(x) -", "TODO: heurisch() is off by a constant: -3/4. Possibly different", "= besselj(nu, x) assert simplify(heurisch(f, x) - g) == 0", "exp(x)), # but Diofant requires a lot of guidance to", "{sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \\ {sin(x),", "permutation # would give the optimal result? def drop_const(expr, x):", "== sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9", "log(x + y) f = 1/(y - x) assert heurisch_wrapper(f,", "x), Derivative(f(x), x)} def test_heurisch_polynomials(): assert heurisch(1, x) == x", "import (Add, Derivative, Ei, Eq, Function, I, Integral, LambertW, Piecewise,", "x) == -2*sqrt(cos(x))**3/3 y = Symbol('y') assert heurisch(sin(y*sqrt(x)), x) ==", "test_components(): assert components(x*y, x) == {x} assert components(1/(x + y),", "- x*sqrt(4 + x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) ==", "x)) assert heurisch_wrapper(f, x) == \\ Piecewise((1/x, Eq(y, 0)), (log(x", "erf(x) + 1) g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x)", "- (sqrt(16 - x**2))*asin(x/4) \\ + (sqrt(16 - x**2))*acos(x/4) +", "symbols('x,y,z,nu') f = Function('f') def test_components(): assert components(x*y, x) ==", "(2 + cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x +", "I, Integral, LambertW, Piecewise, Rational, Sum, Symbol, acos, asin, asinh,", "+ x), x) == log(x + 2) assert heurisch(1/(x +", "= -AiryAi(x) + AiryAi(1, x)*x # Whittaker functions # f", "test_heurisch_trigonometric(): assert heurisch(sin(x), x) == -cos(x) assert heurisch(pi*sin(x) + 1,", "seconds on 3.4 GHz def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 -", "+ AiryAi(1, x)*x # Whittaker functions # f = WhittakerW(mu", "6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x) assert", "both cases heurisch() gives None. Wrong besselj() derivative? f =", "def test_heurisch_polynomials(): assert heurisch(1, x) == x assert heurisch(x, x)", "x) == log(x + y) assert heurisch(1/(x + sqrt(2)), x)", "diofant.integrals.heurisch import components, heurisch, heurisch_wrapper __all__ = () x, y,", "- I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2),", "- tan(x)) / tan(x)**2 + tan(x) g = -x**2/2 -", "2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4), x) == 2*x", "== exp(17*x) / 17 assert heurisch(x*exp(x), x) == x*exp(x) -", "== x**18/18 def test_heurisch_fractions(): assert heurisch(1/x, x) == log(x) assert", "- 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[])", "x) # g = x/2 - mu*ln(x) - ln(WhittakerW(mu, nu,", "4*x**2), x, hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[]) ==", "heurisch(exp(-x), x) == -exp(-x) assert heurisch(exp(17*x), x) == exp(17*x) /", "= Function('f') def test_components(): assert components(x*y, x) == {x} assert", "== x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 /", "/ (x**2 - AiryAi(x)**2) # g = Rational(1,2)*ln(x + AiryAi(x))", "tan(x) g = -x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2", "x) assert heurisch_wrapper(f, x) == -log(x - y) f =", "x) == log(x + sqrt(2)) assert simplify(diff(heurisch(log(x + y +", "1/((y - x)*(y + x)) assert heurisch_wrapper(f, x) == \\", "x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) == cosh(x)", "2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) ==", "+ 1/(x + exp(x) + log(x)) # TODO: Optimal solution", "pi*cos(x) assert heurisch(cos(x), x) == sin(x) assert heurisch(tan(x), x) in", "Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x)) # f =", "(sqrt(16 - x**2))*asin(x/4) \\ + (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4)", "assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special():", "= (nu*besselj(nu, x) - x*besselj(nu + 1, x))/x g =", "assert heurisch_wrapper(f, x) == \\ Piecewise((1/x, Eq(y, 0)), (log(x +", "exp(x) assert heurisch(x*exp(x**2), x) == exp(x**2) / 2 assert heurisch(exp(-x**2),", "x*log(x) - x assert heurisch(log(3*x), x) == -x + x*log(3)", "4*x**3 + 4*x) + log(x) assert drop_const(ratsimp(heurisch(f, x)), x) ==", "sure the algorithm does the right thing if the ring", "y)/2/y, True)) # issue sympy/sympy#6926 f = sqrt(x**2/((y - x)*(y", "== -2*sqrt(cos(x))**3/3 y = Symbol('y') assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x))", "== 2**x/log(2) assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2) assert", "1, x))/x g = besselj(nu, x) assert simplify(heurisch(f, x) -", "x) == x*log(x) - x assert heurisch(log(3*x), x) == -x", "== x*exp(x) - exp(x) assert heurisch(x*exp(x**2), x) == exp(x**2) /", "z), y), y)) == log(x + y + z) def", "def test_pmint_besselj(): # TODO: in both cases heurisch() gives None.", "Airy functions # f = (x - AiryAi(x)*AiryAi(1, x)) /", "x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2), x,", "x assert heurisch(log(3*x), x) == -x + x*log(3) + x*log(x)", "thing if the ring is RR. See # issue sympy/sympy#8685.", "(z, 2, 3)).function, x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1,", "1) + 1.0*asinh(0.5*x) # TODO: convert the rest of PMINT", "+ sqrt(2)), x) == log(x + sqrt(2)) assert simplify(diff(heurisch(log(x +", "omega(x) * (2 + cos(omega(x)) * (x + omega(x))))/(1 +", "f(x), Derivative(f(x), x), Derivative(f(x), x)} def test_heurisch_polynomials(): assert heurisch(1, x)", "12, 5*log(-2*x**6 + 5) / 12] assert heurisch(5*x**5/(2*x**6 + 5),", "= x*LambertW(x) - x + x/LambertW(x) assert heurisch(f, x) ==", "AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2) # g = Rational(1,2)*ln(x", "+ log(x)**2)), x) == I*log(log(x) + I)/2 - \\ I*log(log(x)", "x) == 1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x), x) == x*log(x)", "x/LambertW(x) assert heurisch(f, x) == g @pytest.mark.xfail def test_pmint_besselj(): #", "+ asinh(x/2) - x*sqrt(4 + x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x),", "x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2", "cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x + omega(x)) g", "3)).function, x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1, 2)).function, z)", "== log(x + y) assert heurisch(1/(x + sqrt(2)), x) ==", "(1 + log(x)**2)), x) == I*log(log(x) + I)/2 - \\", "rest of PMINT tests: # Airy functions # f =", "log(tan(x) - I) - I*x, ] assert heurisch(sin(x)*sin(y), x) ==", "4*x**2 + 8*x - 8)/(x**8 + 6*x**6 + 12*x**4 +", "12 assert heurisch(1/x**2, x) == -1/x assert heurisch(-1/x**5, x) ==", "+ omega(x)) g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x))) assert", "# TODO: Optimal solution is g = 1/(x + log(x)", "2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x), x) ==", "== log(sin(x)) assert heurisch(x*sin(7*x), x) == sin(7*x) / 49 -", "assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \\ - y**2*sqrt(x**2)*sqrt(1/(-x**2", "x) == -x + x*log(3) + x*log(x) assert heurisch(log(x**2), x)", "== exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) ==", "log(x + LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f, x) == g", "log(x)) # TODO: Optimal solution is g = 1/(x +", "* (x + omega(x))))/(1 + omega(x))/(x + omega(x)) g =", "sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2), x, hints=[]) == x*sqrt(1 -", "# g = -AiryAi(x) + AiryAi(1, x)*x # Whittaker functions", "- sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f, x)) == g def", "\\ 0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x) # TODO: convert the", "are examples from the Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def", "x)) / (x**2 - AiryAi(x)**2) # g = Rational(1,2)*ln(x +", "1)/8 - sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f, x)) == g", "drop_const(expr, x): if expr.is_Add: return Add(*[ arg for arg in", "heurisch, heurisch_wrapper __all__ = () x, y, z, nu =", "assert (heurisch(sqrt(1 - 7*x**2), x, hints=[]) == x*sqrt(1 - 7*x**2)/2", "(x**7 - 24*x**4 - 4*x**2 + 8*x - 8)/(x**8 +", "+ x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) == cosh(x) assert", "sin(x) in answer when run via setup.py and cos(x) when", "RR. See # issue sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2), x,", "is off by a constant: -3/4. Possibly different permutation #", "exp(x) - 1)/(x + log(x) + exp(x))**2/x g = log(x**2", "derivative? f = besselj(nu + 1, x)/besselj(nu, x) g =", "(x - tan(x)) / tan(x)**2 + tan(x) g = -x**2/2", "- exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) == 2*sqrt(x) assert", "exp, li, log, pi, ratsimp, root, simplify, sin, sinh, sqrt,", "correct? assert heurisch(5*x**5/( 2*x**6 - 5), x) in [5*log(2*x**6 -", "sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert", "= Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\", "x))) assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \\ -", "[5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 + 5) / 12]", "simplify(heurisch(f, x) - g) == 0 f = (nu*besselj(nu, x)", "= 5*pi*I/12, Mathematica gives identical # result in the first", "== -cos(y)*sin(x) # gives sin(x) in answer when run via", "= LambertW(x) g = x*LambertW(x) - x + x/LambertW(x) assert", "is None assert heurisch(2**x, x) == 2**x/log(2) assert heurisch(x*2**x, x)", "+ log(x) + exp(x)), # but Diofant requires a lot", "(-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y =", "== {sin(x), x} assert components(sin(x)*sqrt(log(x)), x) == \\ {log(x), sin(x),", "2)).function, z) == x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x), x) ==", "x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2,", "+ cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x + omega(x))", "erf, exp, li, log, pi, ratsimp, root, simplify, sin, sinh,", "* x) # g = x/2 - mu*ln(x) - ln(WhittakerW(mu,", "assert components(Derivative(f(x), x), x) == \\ {x, f(x), Derivative(f(x), x)}", "g @pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_erf():", "I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2", "I) - I*x, ] assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert", "assert components(f(x), x) == \\ {x, f(x)} assert components(Derivative(f(x), x),", "log(besselj(nu, x)) assert simplify(heurisch(f, x) - g) == 0 f", "def drop_const(expr, x): if expr.is_Add: return Add(*[ arg for arg", "signs of expressions without any care. # XXX ^ ^", "heurisch(cos(x)/sin(x), x) == log(sin(x)) assert heurisch(x*sin(7*x), x) == sin(7*x) /", "gives sin(x) in answer when run via setup.py and cos(x)", "is because diofant changes # signs of expressions without any", "assert heurisch(log(3*x), x) == -x + x*log(3) + x*log(x) assert", "True)) y = Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x))", "0)), (log(x + y)/2/y - log(x - y)/2/y, True)) #", "AiryAi(1, x)*x # Whittaker functions # f = WhittakerW(mu +", "- log(x - y)/2/y, True)) # issue sympy/sympy#6926 f =", "def test_pmint_logexp(): f = (1 + x + x*exp(x))*(x +", "arg for arg in expr.args if arg.has(x) ]) else: return", "{x} assert components(1/(x + y), x) == {x} assert components(sin(x),", "{x, f(x)} assert components(Derivative(f(x), x), x) == \\ {x, f(x),", "I)/2 - \\ I*log(log(x) - I)/2 # These are examples", "expr.args if arg.has(x) ]) else: return expr f = (x**7", "x)) == g @pytest.mark.slow # 8 seconds on 3.4 GHz", "log(x + y + z) def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y')", "+ x))) assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \\", "+ 1) + 1.0*asinh(0.5*x) # TODO: convert the rest of", "cosh(x) assert heurisch(cosh(x), x) == sinh(x) assert heurisch(x*sinh(x), x) ==", "log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) +", "== g def test_pmint_trig(): f = (x - tan(x)) /", "gives identical # result in the first case. The difference", "y) == -cos(y)*sin(x) # gives sin(x) in answer when run", "-cos(y)*sin(x) # gives sin(x) in answer when run via setup.py", "sqrt(2)) assert simplify(diff(heurisch(log(x + y + z), y), y)) ==", "# 8 seconds on 3.4 GHz def test_pmint_erf(): f =", "heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x) def test_heurisch_trigonometric(): assert", "sin(LambertW(exp(x))) assert heurisch(f, x) == g def test_RR(): # Make", "- x*cos(7*x) / 7 assert heurisch(1/pi/4 * x**2*cos(x), x) ==", "== g def test_RR(): # Make sure the algorithm does", "heurisch(f, x) == g @pytest.mark.xfail def test_pmint_besselj(): # TODO: in", "test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x)) f = (1 + omega(x)", "pi, ratsimp, root, simplify, sin, sinh, sqrt, symbols, tan) from", "in both cases heurisch() gives None. Wrong besselj() derivative? f", "x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) # gives", "assert heurisch(2**x, x) == 2**x/log(2) assert heurisch(x*2**x, x) == x*2**x/log(2)", "- cosh(x) assert heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2)", "^ ^ is this still correct? assert heurisch(5*x**5/( 2*x**6 -", "besselj() derivative? f = besselj(nu + 1, x)/besselj(nu, x) g", "log(x) + exp(x)) + log(x + log(x) + exp(x)), #", "cases heurisch() gives None. Wrong besselj() derivative? f = besselj(nu", "x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x), x) == -cos(x) assert heurisch(pi*sin(x)", "heurisch(log(x), x) == x*log(x) - x assert heurisch(log(3*x), x) ==", "components(1/(x + y), x) == {x} assert components(sin(x), x) ==", "-3/4. Possibly different permutation # would give the optimal result?", "log(x + sqrt(2)) assert simplify(diff(heurisch(log(x + y + z), y),", "hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) ==", "sin(7*x) / 49 - x*cos(7*x) / 7 assert heurisch(1/pi/4 *", "x) is None assert heurisch(2**x, x) == 2**x/log(2) assert heurisch(x*2**x,", "properly simplify heurisch() output. assert ratsimp(heurisch(f, x)) == g @pytest.mark.slow", "assert heurisch(x*exp(x), x) == x*exp(x) - exp(x) assert heurisch(x*exp(x**2), x)", "x): if expr.is_Add: return Add(*[ arg for arg in expr.args", "heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2 / 2] assert", "heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 +", "== sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7)", "g @pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_logexp():", "g = 1/(x + log(x) + exp(x)) + log(x +", "1, x)/besselj(nu, x) g = nu*log(x) - log(besselj(nu, x)) assert", "of guidance to properly simplify heurisch() output. assert ratsimp(heurisch(f, x))", "different permutation # would give the optimal result? def drop_const(expr,", "x, y, z, nu = symbols('x,y,z,nu') f = Function('f') def", "+ 2) assert heurisch(1/(x + sin(y)), x) == log(x +", "x) == g @pytest.mark.slow # 8 seconds on 3.4 GHz", "assert heurisch(x**17, x) == x**18/18 def test_heurisch_fractions(): assert heurisch(1/x, x)", "assert heurisch(1/x, x) == log(x) assert heurisch(1/(2 + x), x)", "-1/x assert heurisch(-1/x**5, x) == 1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x),", "the algorithm does the right thing if the ring is", "def test_heurisch_fractions(): assert heurisch(1/x, x) == log(x) assert heurisch(1/(2 +", "== x*sinh(x) - cosh(x) assert heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2", "assert heurisch(1, x) == x assert heurisch(x, x) == x**2/2", "assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) - 2*sin(x) +", "assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2 / 2]", "heurisch_wrapper(f, x) == -log(x - y) f = 1/((y -", "7 assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) - 2*sin(x)", "- x**2))*asin(x/4) \\ + (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def", "x) == Piecewise( (-1/x, Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) +", "# TODO: in both cases heurisch() gives None. Wrong besselj()", "in answer when run via setup.py and cos(x) when run", "3.4 GHz def test_pmint_logexp(): f = (1 + x +", "2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function, x)", "assert components(sin(x)*sqrt(log(x)), x) == \\ {log(x), sin(x), sqrt(log(x)), x} assert", "f = LambertW(x) g = x*LambertW(x) - x + x/LambertW(x)", "assert (heurisch(sqrt(1 + 7*x**2), x, hints=[]) == x*sqrt(1 + 7*x**2)/2", "assert heurisch(cosh(x), x) == sinh(x) assert heurisch(x*sinh(x), x) == x*cosh(x)", "== x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2),", "+ I)/2 - \\ I*log(log(x) - I)/2 # These are", "assert heurisch(tan(x), x) in [ log(1 + tan(x)**2)/2, log(tan(x) +", "hints=[]) == x*li(x) - Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x), x)", "^ is this still correct? assert heurisch(5*x**5/( 2*x**6 - 5),", "heurisch() output. assert ratsimp(heurisch(f, x)) == g @pytest.mark.slow # 8", "+ tan(x) g = -x**2/2 - x/tan(x) + log(tan(x)**2 +", "Diofant requires a lot of guidance to properly simplify heurisch()", "() x, y, z, nu = symbols('x,y,z,nu') f = Function('f')", "# Airy functions # f = (x - AiryAi(x)*AiryAi(1, x))", "x) == exp(x) assert heurisch(exp(-x), x) == -exp(-x) assert heurisch(exp(17*x),", "x} assert components(f(x), x) == \\ {x, f(x)} assert components(Derivative(f(x),", "test_heurisch_function(): assert heurisch(f(x), x) is None def test_heurisch_wrapper(): f =", "test_pmint_besselj(): # TODO: in both cases heurisch() gives None. Wrong", "g = -x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2 assert", "y + z) def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert heurisch_wrapper(1/(x**2", "# issue sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) ==", "assert heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x) def test_heurisch_trigonometric():", "simplify, sin, sinh, sqrt, symbols, tan) from diofant.integrals.heurisch import components,", "x) == \\ Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y -", "(z, 1, 2)).function, z) == x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x),", "(sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x)", "== sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y),", "+ log(x + log(x) + exp(x)), # but Diofant requires", "components(sin(x)*sqrt(log(x)), x) == \\ {log(x), sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y),", "Symbol('y') assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise( (-1/x, Eq(y,", "x) == x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2", "== x**2/2 assert heurisch(x**17, x) == x**18/18 def test_heurisch_fractions(): assert", "-cos(x) assert heurisch(pi*sin(x) + 1, x) == x - pi*cos(x)", "I*log(x + I*sqrt(y)) / (2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))] def", "x*li(x) - Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x), x) is None", "heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1,", "heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \\ - y**2*sqrt(x**2)*sqrt(1/(-x**2 +", "2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0,", "sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x) == \\ {sin(y*exp(x)), x,", "+ z), y), y)) == log(x + y + z)", "if expr.is_Add: return Add(*[ arg for arg in expr.args if", "test_heurisch_fractions(): assert heurisch(1/x, x) == log(x) assert heurisch(1/(2 + x),", "2*x - (sqrt(16 - x**2))*asin(x/4) \\ + (sqrt(16 - x**2))*acos(x/4)", "x) == g @pytest.mark.xfail def test_pmint_besselj(): # TODO: in both", "(nu*besselj(nu, x) - x*besselj(nu + 1, x))/x g = besselj(nu,", "f = WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu,", "= x**2 * AiryAi(x) # g = -AiryAi(x) + AiryAi(1,", "2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4), x) == 2*x - (sqrt(16", "result? def drop_const(expr, x): if expr.is_Add: return Add(*[ arg for", "1/(y - x) assert heurisch_wrapper(f, x) == -log(x - y)", "assert heurisch(exp(-x), x) == -exp(-x) assert heurisch(exp(17*x), x) == exp(17*x)", "# TODO: convert the rest of PMINT tests: # Airy", "x) == \\ {x, f(x)} assert components(Derivative(f(x), x), x) ==", "heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 + 5) / 12", "+ Rational(1,2)*ln(x - AiryAi(x)) # f = x**2 * AiryAi(x)", "+ (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x),", "5*log(-2*x**6 + 5) / 12] assert heurisch(5*x**5/(2*x**6 + 5), x)", "assert heurisch(5*x**5/( 2*x**6 - 5), x) in [5*log(2*x**6 - 5)", "asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[]) == x*li(x) - Ei(2*log(x)) def", "* x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x)) assert", "== 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert heurisch(sqrt(x)**3, x)", "- 5), x) in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6", "(heurisch(sqrt(1 - 7*x**2), x, hints=[]) == x*sqrt(1 - 7*x**2)/2 +", "hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) ==", "x) == g def test_RR(): # Make sure the algorithm", "{log(x), sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x) == \\ {sin(y*exp(x)),", "x)} def test_heurisch_polynomials(): assert heurisch(1, x) == x assert heurisch(x,", "heurisch(1/(x + sqrt(2)), x) == log(x + sqrt(2)) assert simplify(diff(heurisch(log(x", "log(x - y)/2/y, True)) # issue sympy/sympy#6926 f = sqrt(x**2/((y", "TODO: in both cases heurisch() gives None. Wrong besselj() derivative?", "+ y)/2/y - log(x - y)/2/y, True)) # issue sympy/sympy#6926", "components(f(x), x) == \\ {x, f(x)} assert components(Derivative(f(x), x), x)", "Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x +", "(heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2), x,", "AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x)) # f = x**2 *", "8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x) +", "x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y =", "1.0*asinh(0.5*x) # TODO: convert the rest of PMINT tests: #", "2) assert heurisch(1/(x + sin(y)), x) == log(x + sin(y))", "AiryAi(x)**2) # g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x -", "8 seconds on 3.4 GHz def test_pmint_logexp(): f = (1", "4*x**2), x, hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2), x,", "x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[])", "+ log(x) + exp(x))**2/x g = log(x**2 + 2*x*exp(x) +", "- erf(x) + 1) g = sqrt(pi)*log(erf(x) - 1)/8 -", "assert simplify(diff(heurisch(log(x + y + z), y), y)) == log(x", "# gives sin(x) in answer when run via setup.py and", "heurisch_wrapper(f, x) == \\ Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y", "1) g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8", "17 assert heurisch(x*exp(x), x) == x*exp(x) - exp(x) assert heurisch(x*exp(x**2),", "exp(17*x) / 17 assert heurisch(x*exp(x), x) == x*exp(x) - exp(x)", "== x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \\ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def", "from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper __all__ = () x,", "5*log(2*x**6 + 5) / 12 assert heurisch(1/x**2, x) == -1/x", "the right thing if the ring is RR. See #", "Eq(y, 0)), (log(x + y)/2/y - log(x - y)/2/y, True))", "x) == x - pi*cos(x) assert heurisch(cos(x), x) == sin(x)", "x*sinh(x) - cosh(x) assert heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2 +", "(heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 -", "heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y", "heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert", "assert heurisch(1/(x + y), x) == log(x + y) assert", "# Make sure the algorithm does the right thing if", "(x + omega(x))))/(1 + omega(x))/(x + omega(x)) g = log(x", "2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y = Symbol('y') assert", "Make sure the algorithm does the right thing if the", "+ log(x)) # TODO: Optimal solution is g = 1/(x", "hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[]) == x*li(x) -", "== -1/x assert heurisch(-1/x**5, x) == 1/(4*x**4) def test_heurisch_log(): assert", "http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO: heurisch() is off by a", "x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1, 2)).function, z) ==", "/ 12] assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 +", "g @pytest.mark.xfail def test_pmint_besselj(): # TODO: in both cases heurisch()", "assert components(1/(x + y), x) == {x} assert components(sin(x), x)", "log(tan(x) + I) + I*x, log(tan(x) - I) - I*x,", "def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) == cosh(x) assert heurisch(cosh(x), x)", "assert components(sin(x), x) == {sin(x), x} assert components(sin(x)*sqrt(log(x)), x) ==", "heurisch() gives None. Wrong besselj() derivative? f = besselj(nu +", "(Add, Derivative, Ei, Eq, Function, I, Integral, LambertW, Piecewise, Rational,", "5), x) == 5*log(2*x**6 + 5) / 12 assert heurisch(1/x**2,", "if the ring is RR. See # issue sympy/sympy#8685. assert", "2] assert heurisch(cos(x)/sin(x), x) == log(sin(x)) assert heurisch(x*sin(7*x), x) ==", "(-1/x, Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)),", "components(x**Rational(17, 54)/sqrt(sin(x)), x) == \\ {sin(x), root(x, 54), sqrt(sin(x)), x}", "g = (4 + 8*x**2 + 6*x + 3*x**3)/(x**5 +", "return expr f = (x**7 - 24*x**4 - 4*x**2 +", "g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 -", "assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise( (-1/x, Eq(y, 0)),", "heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x", "g def test_pmint_trig(): f = (x - tan(x)) / tan(x)**2", "== I*log(log(x) + I)/2 - \\ I*log(log(x) - I)/2 #", "g def test_pmint_LambertW(): f = LambertW(x) g = x*LambertW(x) -", "test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals():", "assert heurisch(1/x**2, x) == -1/x assert heurisch(-1/x**5, x) == 1/(4*x**4)", "x) == x**18/18 def test_heurisch_fractions(): assert heurisch(1/x, x) == log(x)", "2, 3)).function, x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1, 2)).function,", "heurisch(x*exp(x**2), x) == exp(x**2) / 2 assert heurisch(exp(-x**2), x) is", "These are examples from the Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/", "== -log(x - y) f = 1/((y - x)*(y +", "y), x) == Piecewise( (-1/x, Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y))", "= nu*log(x) - log(besselj(nu, x)) assert simplify(heurisch(f, x) - g)", "x)*x # Whittaker functions # f = WhittakerW(mu + 1,", "via setup.py and cos(x) when run via py.test assert heurisch(sin(x)*cos(x),", "x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x),", "# f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2 -", "x) == {sin(x), x} assert components(sin(x)*sqrt(log(x)), x) == \\ {log(x),", "off by a constant: -3/4. Possibly different permutation # would", "# but Diofant requires a lot of guidance to properly", "nu, x) * x) # g = x/2 - mu*ln(x)", "x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert heurisch(sqrt(x)**3,", "exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) == 2*sqrt(x)", "x) is None def test_heurisch_wrapper(): f = 1/(y + x)", "x**2 * AiryAi(x) # g = -AiryAi(x) + AiryAi(1, x)*x", "sin(y)) # Up to a constant, where C = 5*pi*I/12,", "ratsimp(heurisch(f, x)) == g @pytest.mark.slow # 8 seconds on 3.4", "heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) ==", "x/tan(x) + log(tan(x)**2 + 1)/2 assert heurisch(f, x) == g", "= Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x)) # f", "x) == 1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4) *", "== x assert heurisch(x, x) == x**2/2 assert heurisch(x**17, x)", "asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == asinh(2*x/3)/2 assert", "- 24*x**4 - 4*x**2 + 8*x - 8)/(x**8 + 6*x**6", "def test_pmint_rat(): # TODO: heurisch() is off by a constant:", "\\ {x, f(x), Derivative(f(x), x), Derivative(f(x), x)} def test_heurisch_polynomials(): assert", "assert heurisch(acos(x/4) * asin(x/4), x) == 2*x - (sqrt(16 -", "== Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y", "+ 4*x) + log(x) assert drop_const(ratsimp(heurisch(f, x)), x) == g", "/ 49 - x*cos(7*x) / 7 assert heurisch(1/pi/4 * x**2*cos(x),", "2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x +", "sin(x) assert heurisch(tan(x), x) in [ log(1 + tan(x)**2)/2, log(tan(x)", "x)) assert simplify(heurisch(f, x) - g) == 0 f =", "= sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x)", "PMINT tests: # Airy functions # f = (x -", "x assert heurisch(x, x) == x**2/2 assert heurisch(x**17, x) ==", "x*LambertW(x) - x + x/LambertW(x) assert heurisch(f, x) == g", "I) + I*x, log(tan(x) - I) - I*x, ] assert", "- sqrt(-y))/2, I*log(x + I*sqrt(y)) / (2*sqrt(y)) - I*log(x -", "assert heurisch(pi*sin(x) + 1, x) == x - pi*cos(x) assert", "x, hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[])", "= 1/((y - x)*(y + x)) assert heurisch_wrapper(f, x) ==", "x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) ==", "- 2*x] def test_heurisch_exp(): assert heurisch(exp(x), x) == exp(x) assert", "+ y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x * (1 + log(x)**2)),", "heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2,", "+ sin(y)), x) == log(x + sin(y)) # Up to", "assert heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4", "x, hints=[]) == x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1", "== log(x + y + z) def test_heurisch_symbolic_coeffs_1130(): y =", "in [sin(x)**2 / 2, -cos(x)**2 / 2] assert heurisch(cos(x)/sin(x), x)", "True)) y = Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 + y), x)", "log(x)**2)/2 + 1/(x + exp(x) + log(x)) # TODO: Optimal", "/ 4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y), x) ==", "+ x*log(x) assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x)", "3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x) assert drop_const(ratsimp(heurisch(f, x)),", "in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x] def test_heurisch_exp(): assert", "functions # f = WhittakerW(mu + 1, nu, x) /", "+ 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2), x, hints=[])", "+ exp(x)) + log(x + log(x) + exp(x)), # but", "== 1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4),", "- 7*x**2), x, hints=[]) == x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14)", "x) == \\ {sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x)", "expr f = (x**7 - 24*x**4 - 4*x**2 + 8*x", "x) == \\ {x, f(x), Derivative(f(x), x), Derivative(f(x), x)} def", "lot of guidance to properly simplify heurisch() output. assert ratsimp(heurisch(f,", "/ (2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1", "* (1 + log(x)**2)), x) == I*log(log(x) + I)/2 -", "@pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_erf(): f", "+ exp(x)), # but Diofant requires a lot of guidance", "the Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO:", "def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert heurisch_wrapper(1/(x**2 + y), x)", "heurisch() is off by a constant: -3/4. Possibly different permutation", "+ 2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4), x) == 2*x -", "WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu, x) *", "assert heurisch(f, x) == g @pytest.mark.slow # 8 seconds on", "arg in expr.args if arg.has(x) ]) else: return expr f", "Optimal solution is g = 1/(x + log(x) + exp(x))", "assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2),", "assert heurisch(f, x) == g @pytest.mark.xfail def test_pmint_besselj(): # TODO:", "give the optimal result? def drop_const(expr, x): if expr.is_Add: return", "from diofant import (Add, Derivative, Ei, Eq, Function, I, Integral,", "# Whittaker functions # f = WhittakerW(mu + 1, nu,", "expressions without any care. # XXX ^ ^ ^ is", "TODO: Optimal solution is g = 1/(x + log(x) +", "heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x) assert heurisch(x*cosh(x), x) ==", "optimal result? def drop_const(expr, x): if expr.is_Add: return Add(*[ arg", "== sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == asin(2*x/3)/2", "the optimal result? def drop_const(expr, x): if expr.is_Add: return Add(*[", "heurisch(x, x) == x**2/2 assert heurisch(x**17, x) == x**18/18 def", "x) == -log(x - y) f = 1/((y - x)*(y", "== \\ {x, f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x), x), x)", "assert heurisch(li(x), x, hints=[]) == x*li(x) - Ei(2*log(x)) def test_heurisch_function():", "x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2), x,", "hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert", "\\ {x, f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x), x), x) ==", "in [ log(1 + tan(x)**2)/2, log(tan(x) + I) + I*x,", "7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[]) ==", "x) == log(x + y) f = 1/(y - x)", "x) == sinh(x) assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x)", "Symbol('y') assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y assert", "ratsimp, root, simplify, sin, sinh, sqrt, symbols, tan) from diofant.integrals.heurisch", "y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2,", "x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \\ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x", "g = log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x) +", "Integral, LambertW, Piecewise, Rational, Sum, Symbol, acos, asin, asinh, besselj,", "x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4)", "heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x", "== -x + x*log(3) + x*log(x) assert heurisch(log(x**2), x) in", "== g @pytest.mark.xfail def test_pmint_besselj(): # TODO: in both cases", "y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x * (1 + log(x)**2)), x)", "log(x) + exp(x) - 1)/(x + log(x) + exp(x))**2/x g", "+ y) f = 1/(y - x) assert heurisch_wrapper(f, x)", "- \\ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x), x) == x*erf(x)", "components(Derivative(f(x), x), x) == \\ {x, f(x), Derivative(f(x), x)} assert", "x))/x g = besselj(nu, x) assert simplify(heurisch(f, x) - g)", "Possibly different permutation # would give the optimal result? def", "x, hints=[]) == \\ 0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x) #", "f = 1/(y - x) assert heurisch_wrapper(f, x) == -log(x", "- I)/2 # These are examples from the Poor Man's", "== sin(7*x) / 49 - x*cos(7*x) / 7 assert heurisch(1/pi/4", "= Symbol('y') assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y", "assert heurisch(f, x) == g def test_RR(): # Make sure", "log(tan(x)**2 + 1)/2 assert heurisch(f, x) == g @pytest.mark.slow #", "7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2), x, hints=[]) ==", "1, x) == x - pi*cos(x) assert heurisch(cos(x), x) ==", "run via py.test assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2,", "- I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2), x, hints=[])", "- y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x * (1", "7*x**2), x, hints=[]) == x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert", "test_pmint_LambertW(): f = LambertW(x) g = x*LambertW(x) - x +", "assert heurisch(x, x) == x**2/2 assert heurisch(x**17, x) == x**18/18", "of expressions without any care. # XXX ^ ^ ^", "is g = 1/(x + log(x) + exp(x)) + log(x", "(1 + omega(x) * (2 + cos(omega(x)) * (x +", "heurisch(li(x), x, hints=[]) == x*li(x) - Ei(2*log(x)) def test_heurisch_function(): assert", "== exp(x**2) / 2 assert heurisch(exp(-x**2), x) is None assert", "= (x - tan(x)) / tan(x)**2 + tan(x) g =", "to a constant, where C = 5*pi*I/12, Mathematica gives identical", "y) f = 1/((y - x)*(y + x)) assert heurisch_wrapper(f,", "+ x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 -", "/ tan(x)**2 + tan(x) g = -x**2/2 - x/tan(x) +", "x)/besselj(nu, x) g = nu*log(x) - log(besselj(nu, x)) assert simplify(heurisch(f,", "- y)/2/y, True)) # issue sympy/sympy#6926 f = sqrt(x**2/((y -", "+ 3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x) assert drop_const(ratsimp(heurisch(f,", "x*exp(x) - exp(x) assert heurisch(x*exp(x**2), x) == exp(x**2) / 2", "== asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == asinh(2*x/3)/2", "the first case. The difference is because diofant changes #", "- 2*x, 2*x*log(x) - 2*x] def test_heurisch_exp(): assert heurisch(exp(x), x)", "test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) ==", "hints=[]) == \\ 0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x) # TODO:", "setup.py and cos(x) when run via py.test assert heurisch(sin(x)*cos(x), x)", "sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x) == \\ {sin(y*exp(x)), x, exp(x)}", "x*log(x) assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x) -", "sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f, x)) == g def test_pmint_LambertW():", "x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x)", "heurisch(sin(x), x) == -cos(x) assert heurisch(pi*sin(x) + 1, x) ==", "+ log(x) + exp(x)) + log(x + log(x) + exp(x)),", "test_pmint_rat(): # TODO: heurisch() is off by a constant: -3/4.", "f = 1/((y - x)*(y + x)) assert heurisch_wrapper(f, x)", "== \\ {x, f(x)} assert components(Derivative(f(x), x), x) == \\", "exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs():", "x)), x) == g def test_pmint_trig(): f = (x -", "omega(x)) g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f,", "== 2*x - (sqrt(16 - x**2))*asin(x/4) \\ + (sqrt(16 -", "+ x)) assert heurisch_wrapper(f, x) == \\ Piecewise((1/x, Eq(y, 0)),", "cosh(x) assert heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) -", "x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4 def", "hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) ==", "8 seconds on 3.4 GHz def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3", "== \\ {sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x) ==", "+ 1)/2 assert heurisch(f, x) == g @pytest.mark.slow # 8", "test_sympyissue_3609(): assert heurisch(1/(x * (1 + log(x)**2)), x) == I*log(log(x)", "\\ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x), x) == x*erf(x) +", "heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y = Symbol('y') assert heurisch(sin(y*sqrt(x)), x)", "symbols, tan) from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper __all__ =", "1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4), x)", "heurisch(-1/x**5, x) == 1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x), x) ==", "# g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x))", "+ 5), x) == 5*log(2*x**6 + 5) / 12 assert", "Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True))", "x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[])", "heurisch(exp(17*x), x) == exp(17*x) / 17 assert heurisch(x*exp(x), x) ==", "== Piecewise( (-1/x, Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x", "+ sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2), x, hints=[]) == x*sqrt(1", "0 @pytest.mark.slow def test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x)) f =", "assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)),", "== \\ {x, f(x), Derivative(f(x), x), Derivative(f(x), x)} def test_heurisch_polynomials():", "in expr.args if arg.has(x) ]) else: return expr f =", "] assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) ==", "heurisch_wrapper __all__ = () x, y, z, nu = symbols('x,y,z,nu')", "# g = x/2 - mu*ln(x) - ln(WhittakerW(mu, nu, x))", "def test_heurisch_trigonometric(): assert heurisch(sin(x), x) == -cos(x) assert heurisch(pi*sin(x) +", "+ exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4 def", "test_heurisch_wrapper(): f = 1/(y + x) assert heurisch_wrapper(f, x) ==", "== x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1, 2), (z,", "g = besselj(nu, x) assert simplify(heurisch(f, x) - g) ==", "x), x) == log(x + 2) assert heurisch(1/(x + sin(y)),", "x) == x assert heurisch(x, x) == x**2/2 assert heurisch(x**17,", "== x - pi*cos(x) assert heurisch(cos(x), x) == sin(x) assert", "# f = x**2 * AiryAi(x) # g = -AiryAi(x)", "to properly simplify heurisch() output. assert ratsimp(heurisch(f, x)) == g", "== x*cosh(x) - sinh(x) assert heurisch(x*cosh(x), x) == x*sinh(x) -", "positive=True) assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2", "heurisch(1/(x * (1 + log(x)**2)), x) == I*log(log(x) + I)/2", "root, simplify, sin, sinh, sqrt, symbols, tan) from diofant.integrals.heurisch import", "exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x) +", "y = Symbol('y') assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\", "+ 1, nu, x) / (WhittakerW(mu, nu, x) * x)", "answer when run via setup.py and cos(x) when run via", "from the Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): #", "# f = WhittakerW(mu + 1, nu, x) / (WhittakerW(mu,", "is RR. See # issue sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2),", "+ I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y', positive=True) assert", "- x assert heurisch(log(3*x), x) == -x + x*log(3) +", "x) == log(sin(x)) assert heurisch(x*sin(7*x), x) == sin(7*x) / 49", "x) == sin(7*x) / 49 - x*cos(7*x) / 7 assert", "{sin(x), x} assert components(sin(x)*sqrt(log(x)), x) == \\ {log(x), sin(x), sqrt(log(x)),", "x) == cosh(x) assert heurisch(cosh(x), x) == sinh(x) assert heurisch(x*sinh(x),", "assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y = Symbol('y') assert heurisch(sin(y*sqrt(x)),", "Function('f') def test_components(): assert components(x*y, x) == {x} assert components(1/(x", "Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \\ 2*sqrt(x)*cos(y*sqrt(x))/y", "- pi*cos(x) assert heurisch(cos(x), x) == sin(x) assert heurisch(tan(x), x)", "x) - g) == 0 f = (nu*besselj(nu, x) -", "asin, asinh, besselj, cos, cosh, diff, erf, exp, li, log,", "= 1/(y - x) assert heurisch_wrapper(f, x) == -log(x -", "== 0 f = (nu*besselj(nu, x) - x*besselj(nu + 1,", "= exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1) g =", "run via setup.py and cos(x) when run via py.test assert", "assert heurisch(1/sqrt(x), x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x)", "assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals(): assert", "x) == x*sinh(x) - cosh(x) assert heurisch( x*asinh(x/2), x) ==", "x) g = nu*log(x) - log(besselj(nu, x)) assert simplify(heurisch(f, x)", "assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 -", "omega(x): return LambertW(exp(x)) f = (1 + omega(x) * (2", "- x + x/LambertW(x) assert heurisch(f, x) == g @pytest.mark.xfail", "when run via py.test assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 /", "+ 1, x))/x g = besselj(nu, x) assert simplify(heurisch(f, x)", "log(x) assert heurisch(1/(2 + x), x) == log(x + 2)", "assert components(x*y, x) == {x} assert components(1/(x + y), x)", "x) assert heurisch_wrapper(f, x) == log(x + y) f =", "log(x) + exp(x)), # but Diofant requires a lot of", "== x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x), x) == -cos(x) assert", "-AiryAi(x) + AiryAi(1, x)*x # Whittaker functions # f =", "heurisch(1/x, x) == log(x) assert heurisch(1/(2 + x), x) ==", "@pytest.mark.slow def test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x)) f = (1", "heurisch(sinh(x), x) == cosh(x) assert heurisch(cosh(x), x) == sinh(x) assert", "sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f, x))", "+ 1.0*asinh(0.5*x) # TODO: convert the rest of PMINT tests:", "-2/sqrt(x) assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) ==", "x) == x*cosh(x) - sinh(x) assert heurisch(x*cosh(x), x) == x*sinh(x)", "+ omega(x) * (2 + cos(omega(x)) * (x + omega(x))))/(1", "8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2) g = (4", "Mathematica gives identical # result in the first case. The", "Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO: heurisch() is off", "f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2)", "y), x) == log(x + y) assert heurisch(1/(x + sqrt(2)),", "if arg.has(x) ]) else: return expr f = (x**7 -", "heurisch(x**17, x) == x**18/18 def test_heurisch_fractions(): assert heurisch(1/x, x) ==", "LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f, x) == g def test_RR():", "+ 1, x)/besselj(nu, x) g = nu*log(x) - log(besselj(nu, x))", "+ 1) g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) +", "f = (x - tan(x)) / tan(x)**2 + tan(x) g", "7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2), x,", "z, nu = symbols('x,y,z,nu') f = Function('f') def test_components(): assert", "x*log(3) + x*log(x) assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x,", "I*x, ] assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y)", "assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9", "test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2), x, hints=[]) == x*sqrt(1 +", "4*x) + log(x) assert drop_const(ratsimp(heurisch(f, x)), x) == g def", "assert drop_const(ratsimp(heurisch(f, x)), x) == g def test_pmint_trig(): f =", "+ y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x -", "def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y), x) == log(x +", "where C = 5*pi*I/12, Mathematica gives identical # result in", "== (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x)", "== -exp(-x) assert heurisch(exp(17*x), x) == exp(17*x) / 17 assert", "+ 7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2),", "== log(x + y) f = 1/(y - x) assert", "1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x), x) == x*log(x) - x", "2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 +", "LambertW(x) g = x*LambertW(x) - x + x/LambertW(x) assert heurisch(f,", "[sin(x)**2 / 2, -cos(x)**2 / 2] assert heurisch(cos(x)/sin(x), x) ==", "def test_RR(): # Make sure the algorithm does the right", "= sqrt(x**2/((y - x)*(y + x))) assert heurisch_wrapper(f, x) ==", "assert heurisch(x*sin(7*x), x) == sin(7*x) / 49 - x*cos(7*x) /", "g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x)) #", "\\ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x *", "+ 4*x**3 + 4*x) + log(x) assert drop_const(ratsimp(heurisch(f, x)), x)", "sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y), x)", "I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) / (2*sqrt(y)) - I*log(x", "# TODO: heurisch() is off by a constant: -3/4. Possibly", "heurisch(cos(x), x) == sin(x) assert heurisch(tan(x), x) in [ log(1", "diofant changes # signs of expressions without any care. #", "heurisch(f, x) == g @pytest.mark.slow # 8 seconds on 3.4", "still correct? assert heurisch(5*x**5/( 2*x**6 - 5), x) in [5*log(2*x**6", "heurisch(f, x) == g def test_RR(): # Make sure the", "heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) # gives sin(x) in answer when", "x*sqrt(4 + x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2", "assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x) assert heurisch( x*asinh(x/2),", "g = -AiryAi(x) + AiryAi(1, x)*x # Whittaker functions #", "but Diofant requires a lot of guidance to properly simplify", "- sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f,", "log(x + y) assert heurisch(1/(x + sqrt(2)), x) == log(x", "z) == x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x), x) == -cos(x)" ]
[ "a torch.Tensor. Got {type(input)}\") if not isinstance(contrast_factor, (float, torch.Tensor,)): raise", "torch.Tensor: Adjusted image. \"\"\" def __init__(self, contrast_factor: Union[float, torch.Tensor]) ->", "= torch.unsqueeze(hue_factor, dim=-1) # unpack the hsv values h, s,", "Union[float, torch.Tensor] = saturation_factor def forward(self, input: torch.Tensor) -> torch.Tensor:", "out def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust", "input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_saturation(input, self.saturation_factor)", "not a torch.Tensor. Got {type(input)}\") if not isinstance(contrast_factor, (float, torch.Tensor,)):", "(float, optional): The constant multiplier. Default 1. Returns: torch.Tensor: Adjusted", "out def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust", "be a float number or torch.Tensor.\" f\"Got {type(saturation_factor)}\") if isinstance(saturation_factor,", "torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast factor to each channel x_adjust:", "float): hue_factor = torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor <", "to each channel x_adjust: torch.Tensor = input * contrast_factor #", "must be non-negative. Got {contrast_factor}\") for _ in input.shape[1:]: contrast_factor", "in hsv format already. See :class:`~kornia.color.AdjustHue` for details. \"\"\" if", "torch.Tensor. Got {type(gain)}\") if isinstance(gamma, float): gamma = torch.tensor([gamma]) if", "the hue channel. Should be in [-PI, PI]. PI and", "0 will give a black and white image, 1 will", "image. \"\"\" def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None: super(AdjustContrast,", "black image, 1 does not modify the input image while", "def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None: super(AdjustContrast, self).__init__() self.contrast_factor:", "(\\*, N). contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element", "hue channel in HSV space in positive and negative direction", "# Apply the gamma correction x_adjust: torch.Tensor = gain *", "N). saturation_factor (float): How much to adjust the saturation. 0", "isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should be either a", "{gamma}\") if (gain < 0.0).any(): raise ValueError(f\"Gain must be non-negative.", "f\"Got {type(contrast_factor)}\") if isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor =", "brightness by this factor. Returns: torch.Tensor: Adjusted image. \"\"\" def", "the range [-PI, PI]. Got {hue_factor}\") for _ in input.shape[1:]:", "self).__init__() self.contrast_factor: Union[float, torch.Tensor] = contrast_factor def forward(self, input: torch.Tensor)", "0 generates a compleatly black image, 1 does not modify", "torch.Tensor]): Brightness adjust factor per element in the batch. 0", "torch.nn as nn from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from kornia.constants", "-> torch.Tensor: r\"\"\"Adjust color saturation of an image. Expecting input", "contrast_factor # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust,", "the original image. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self,", "an RGB image in the range of [0, 1]. Args:", "Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, hue_factor: Union[float, torch.Tensor])", "hsv values h, s, v = torch.chunk(input, chunks=3, dim=-3) #", "of (\\*, N). gamma (float): Non negative real number, same", "s, v], dim=-3) return out def adjust_hue(input: torch.Tensor, hue_factor: Union[float,", "same as γ\\gammaγ in the equation. gamma larger than 1", "any other non-negative number modify the brightness by this factor.", "(torch.Tensor): Image/Tensor to be adjusted in the shape of (\\*,", "torch.Tensor: # type: ignore return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r\"\"\"Adjust", "format already. See :class:`~kornia.color.AdjustHue` for details. \"\"\" if not torch.is_tensor(input):", "input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast factor to", "(\\*, N). hue_factor (float): How much to shift the hue", "be in the range of [0, 1]. Args: input (torch.Tensor):", "= adjust_saturation_raw(x_hsv, saturation_factor) # convert back to rgb out: torch.Tensor", "each channel x_adjust: torch.Tensor = input * contrast_factor # Truncate", "torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Brightness of an image. See :class:`~kornia.color.AdjustBrightness`", "an image. The input image is expected to be in", "(Union[float, torch.Tensor]): Brightness adjust factor per element in the batch.", "is not a torch.Tensor. Got {type(input)}\") if not isinstance(hue_factor, (float,", "range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be", "((hue_factor < -pi) | (hue_factor > pi)).any(): raise ValueError(f\"Hue-factor must", "non-negative. Got {gain}\") for _ in input.shape[1:]: gamma = torch.unsqueeze(gamma,", "Apply contrast factor to each channel x_adjust: torch.Tensor = input", "negative direction respectively. 0 means no shift. Therefore, both -PI", "isinstance(gamma, float): gamma = torch.tensor([gamma]) if isinstance(gain, float): gain =", "should be a float number or torch.Tensor.\" f\"Got {type(saturation_factor)}\") if", "of an image. The input image is expected to be", "type is not a torch.Tensor. Got {type(input)}\") if not isinstance(contrast_factor,", "adjusted in the shape of (\\*, N). contrast_factor (Union[float, torch.Tensor]):", "gain.to(input.device).to(input.dtype) if (gamma < 0.0).any(): raise ValueError(f\"Gamma must be non-negative.", "None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor] = saturation_factor def forward(self,", "hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi) | (hue_factor >", "float = 2 * pi.item() h_out: torch.Tensor = torch.fmod(h +", "torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of an image. See :class:`~kornia.color.AdjustHue`", "h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor) # pack back", "0 means no shift. Therefore, both -PI and PI will", "Union[float, torch.Tensor] = 1.) -> None: super(AdjustGamma, self).__init__() self.gamma: Union[float,", "respectively. 0 means no shift. Therefore, both -PI and PI", "N). brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element in", "-pi) | (hue_factor > pi)).any(): raise ValueError(f\"Hue-factor must be in", "isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f\"The hue_factor should be a float", "transform the hue value and appl module divisor: float =", "must be non-negative. Got {gamma}\") if (gain < 0.0).any(): raise", "= torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_contrast(input: torch.Tensor, contrast_factor:", "torch.is_tensor(input): raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")", "= contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any(): raise ValueError(f\"Contrast factor must", "PIL. Hence, the output differs from TorchVision. The input image", "adjust factor per element in the batch. 0 generates a", "hue_factor should be a float number or torch.Tensor in the", "[-PI, PI]. Got {hue_factor}\") for _ in input.shape[1:]: hue_factor =", "expected to be an RGB image in the range of", "torch.tensor([gamma]) if isinstance(gain, float): gain = torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype)", "h, s, v = torch.chunk(input, chunks=3, dim=-3) # transform the", "gain = torch.unsqueeze(gain, dim=-1) # Apply the gamma correction x_adjust:", "* contrast_factor # Truncate between pixel values out: torch.Tensor =", "while 0 gives the original image. Returns: torch.Tensor: Adjusted image.", "torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any(): raise ValueError(f\"Saturation", "type: ignore return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness of", "Brightness of an image. This implementation aligns OpenCV, not PIL.", "shape of (\\*, N). brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor", "float or torch.Tensor. \" f\"Got {type(contrast_factor)}\") if isinstance(contrast_factor, float): contrast_factor", "must be non-negative. Got {gain}\") for _ in input.shape[1:]: gamma", "torch.fmod(h + hue_factor, divisor) # pack back back the corrected", "complementary colors while 0 gives the original image. Returns: torch.Tensor:", "= torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_brightness(input: torch.Tensor, brightness_factor:", "1.0) return out def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) ->", "torch.clamp(s * saturation_factor, min=0, max=1) # pack back back the", "(contrast_factor < 0).any(): raise ValueError(f\"Contrast factor must be non-negative. Got", "brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness factor to each", "format already. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" if not torch.is_tensor(input):", "much to adjust the saturation. 0 will give a black", "type: ignore return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r\"\"\"Adjust hue of", "Union[float, torch.Tensor] = hue_factor def forward(self, input: torch.Tensor) -> torch.Tensor:", "def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue", ":class:`~kornia.color.AdjustSaturation` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type", "differs from TorchVision. The input image is expected to be", "ValueError(f\"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}\")", "non-negative. Got {gamma}\") if (gain < 0.0).any(): raise ValueError(f\"Gain must", "torch.Tensor]) -> None: super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor] = hue_factor", "torch.Tensor) -> torch.Tensor: # type: ignore return adjust_gamma(input, self.gamma, self.gain)", "hue_factor, divisor) # pack back back the corrected hue out:", "correction on an image. The input image is expected to", "isinstance(gamma, (float, torch.Tensor)): raise TypeError(f\"The gamma should be a positive", "ignore return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r\"\"\"Adjust hue of an", "rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor)", "black and white image, 1 will give the original image", "x_adjust: torch.Tensor = gain * torch.pow(input, gamma) # Truncate between", "# type: ignore return adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module): r\"\"\"Adjust", "= 1.) -> torch.Tensor: r\"\"\"Perform gamma correction on an image.", "pi.item() h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor) # pack", "def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None: super(AdjustHue, self).__init__() self.hue_factor:", "ignore return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r\"\"\"Perform gamma correction on", "should be either a float or torch.Tensor. \" f\"Got {type(contrast_factor)}\")", "shape of (\\*, N). saturation_factor (float): How much to adjust", "be adjusted in the shape of (\\*, N). contrast_factor (Union[float,", "= torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any(): raise", "raise TypeError(f\"The factor should be either a float or torch.Tensor.", "be non-negative. Got {gamma}\") if (gain < 0.0).any(): raise ValueError(f\"Gain", "or torch.Tensor. \" f\"Got {type(contrast_factor)}\") if isinstance(contrast_factor, float): contrast_factor =", "adjust the saturation. 0 will give a black and white", "torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float,", "will enhance the saturation by a factor of 2. Returns:", "# type: ignore return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness", "1 make dark regions lighter. gain (float, optional): The constant", "other non-negative number modify the brightness by this factor. Returns:", "torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Contrast of an", "v], dim=-3) return out def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor])", "contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast factor to each", "input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_gamma(input, self.gamma,", "per element in the batch. 0 does not modify the", "class AdjustSaturation(nn.Module): r\"\"\"Adjust color saturation of an image. The input", "an image. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" # convert the", "factor to each channel x_adjust: torch.Tensor = input + brightness_factor", "be in the range [-PI, PI]. Got {hue_factor}\") for _", "non-negative. Got {saturation_factor}\") for _ in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor,", "the batch. 0 does not modify the input image while", "input image while any other number modify the brightness. Returns:", "type is not a torch.Tensor. Got {type(input)}\") if not isinstance(gamma,", "= hsv_to_rgb(x_adjusted) return out def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor],", "range of [0, 1]. Args: input (torch.Tensor): Image to be", "aligns OpenCV, not PIL. Hence, the output differs from TorchVision.", "contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element in the", "input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_hue(input, self.hue_factor)", "if not isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f\"The saturation_factor should be", "the input image while any other non-negative number modify the", "for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type is", "gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor: r\"\"\"Perform gamma correction", "if not isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should be", "pack back back the corrected hue out: torch.Tensor = torch.cat([h_out,", "N). hue_factor (float): How much to shift the hue channel.", "image is expected to be an RGB image in the", "s, v = torch.chunk(input, chunks=3, dim=-3) # transform the hue", "conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) # convert back to", "correction on an image. See :class:`~kornia.color.AdjustGamma` for details. \"\"\" if", "torch.Tensor)): raise TypeError(f\"The hue_factor should be a float number or", "will give a black and white image, 1 will give", "rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_gamma(input: torch.Tensor,", "out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_gamma(input: torch.Tensor, gamma:", "= torch.clamp(x_adjust, 0.0, 1.0) return out class AdjustSaturation(nn.Module): r\"\"\"Adjust color", "saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any(): raise ValueError(f\"Saturation factor", "hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the conversion x_adjusted:", "chunks=3, dim=-3) # transform the hue value and appl module", "brightness_factor: Union[float, torch.Tensor]) -> None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor]", "* pi.item() h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor) #", "out class AdjustSaturation(nn.Module): r\"\"\"Adjust color saturation of an image. The", "give the original image while 2 will enhance the saturation", "type: ignore return adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module): r\"\"\"Adjust Contrast", "torch.Tensor = input + brightness_factor # Truncate between pixel values", "does not modify the input image while any other non-negative", "appl module divisor: float = 2 * pi.item() h_out: torch.Tensor", "Union[float, torch.Tensor] = 1.) -> torch.Tensor: r\"\"\"Perform gamma correction on", "adjusted in the shape of (\\*, N). gamma (float): Non", "Union[float, torch.Tensor] = gain def forward(self, input: torch.Tensor) -> torch.Tensor:", "or torch.Tensor. \" f\"Got {type(brightness_factor)}\") if isinstance(brightness_factor, float): brightness_factor =", "super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor] = contrast_factor def forward(self, input:", "torch.Tensor) -> torch.Tensor: # type: ignore return adjust_saturation(input, self.saturation_factor) class", "Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Contrast of an image. See", "torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of an", "RGB image in the range of [0, 1]. Args: input", "| (hue_factor > pi)).any(): raise ValueError(f\"Hue-factor must be in the", "the original image while 2 will enhance the saturation by", "See :class:`~kornia.color.AdjustContrast` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input", "Hence, the output differs from TorchVision. The input image is", "(gain < 0.0).any(): raise ValueError(f\"Gain must be non-negative. Got {gain}\")", "factor per element in the batch. 0 does not modify", "return adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module): r\"\"\"Adjust Contrast of an", "[0, 1]. Args: input (torch.Tensor): Image to be adjusted in", "rgb_to_hsv, hsv_to_rgb from kornia.constants import pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor:", "self.gain: Union[float, torch.Tensor] = gain def forward(self, input: torch.Tensor) ->", "# transform the hue value and appl module divisor: float", "torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color saturation of an image. Expecting", "back back the corrected hue out: torch.Tensor = torch.cat([h_out, s,", "adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r\"\"\"Adjust hue of an image. The", "an image. The input image is expected to be an", "= hue_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type:", "x_adjust: torch.Tensor = input * contrast_factor # Truncate between pixel", "original image. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, hue_factor:", "input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) # unpack the hsv values", "= saturation_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type:", "not isinstance(gamma, (float, torch.Tensor)): raise TypeError(f\"The gamma should be a", "if (gamma < 0.0).any(): raise ValueError(f\"Gamma must be non-negative. Got", "in hsv format already. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" if", "> pi)).any(): raise ValueError(f\"Hue-factor must be in the range [-PI,", "input image is expected to be an RGB image in", "torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1) # pack back", "in the range between\" f\" [-PI, PI]. Got {type(hue_factor)}\") if", "torch.Tensor: r\"\"\"Adjust hue of an image. See :class:`~kornia.color.AdjustHue` for details.", "forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_saturation(input,", "adjust_hue_raw(x_hsv, hue_factor) # convert back to rgb out: torch.Tensor =", "forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_hue(input,", "TypeError(f\"The saturation_factor should be a float number or torch.Tensor.\" f\"Got", "not a torch.Tensor. Got {type(input)}\") if not isinstance(hue_factor, (float, torch.Tensor)):", "1.) -> torch.Tensor: r\"\"\"Perform gamma correction on an image. See", "both -PI and PI will give an image with complementary", "hue of an image. See :class:`~kornia.color.AdjustHue` for details. \"\"\" #", "r\"\"\"Adjust color saturation of an image. The input image is", "(torch.Tensor): Image to be adjusted in the shape of (\\*,", "r\"\"\"Adjust hue of an image. See :class:`~kornia.color.AdjustHue` for details. \"\"\"", "of hue channel in HSV space in positive and negative", "input to be in hsv format already. See :class:`~kornia.color.AdjustHue` for", "in input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain, dim=-1)", "-> None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor] = brightness_factor def", "f\"Got {type(brightness_factor)}\") if isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor =", "image. See :class:`~kornia.color.AdjustHue` for details. \"\"\" # convert the rgb", "float): gain = torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype)", "to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_gamma(input:", "hue channel. Should be in [-PI, PI]. PI and -PI", "x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) # convert back to rgb", "-PI give complete reversal of hue channel in HSV space", "if isinstance(gamma, float): gamma = torch.tensor([gamma]) if isinstance(gain, float): gain", "convert the rgb image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input)", "= gamma self.gain: Union[float, torch.Tensor] = gain def forward(self, input:", "Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor: r\"\"\"Perform", "def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue", "= torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]: brightness_factor", "forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_brightness(input,", "image in the range of [0, 1]. Args: input (torch.Tensor):", "the input image while any other number modify the brightness.", "hue of an image. Expecting input to be in hsv", "Contrast of an image. See :class:`~kornia.color.AdjustContrast` for details. \"\"\" if", "v], dim=-3) return out def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor])", "is expected to be in the range of [0, 1].", "not modify the input image while any other non-negative number", "Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of an image. Expecting", "for _ in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) # unpack", "out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input: torch.Tensor, hue_factor:", "See :class:`~kornia.color.AdjustGamma` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input", "will give an image with complementary colors while 0 gives", "adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module): r\"\"\"Adjust Contrast of an image.", "input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness factor to", "torch.unsqueeze(gain, dim=-1) # Apply the gamma correction x_adjust: torch.Tensor =", "image. The input image is expected to be an RGB", "r\"\"\"Adjust Brightness of an image. This implementation aligns OpenCV, not", "or torch.Tensor.\" f\"Got {type(saturation_factor)}\") if isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor])", "make the shadows darker, while gamma smaller than 1 make", "the brightness by this factor. Returns: torch.Tensor: Adjusted image. \"\"\"", "dim=-1) # Apply the gamma correction x_adjust: torch.Tensor = gain", "channel x_adjust: torch.Tensor = input * contrast_factor # Truncate between", "and negative direction respectively. 0 means no shift. Therefore, both", "in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness factor", "\"\"\" def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None: super(AdjustBrightness, self).__init__()", "import Union import torch import torch.nn as nn from kornia.color.hsv", "pack back back the corrected hue out: torch.Tensor = torch.cat([h,", "contrast_factor: Union[float, torch.Tensor]) -> None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor]", "self).__init__() self.hue_factor: Union[float, torch.Tensor] = hue_factor def forward(self, input: torch.Tensor)", "the hue value and appl module divisor: float = 2", "torch.Tensor] = 1.) -> torch.Tensor: r\"\"\"Perform gamma correction on an", "input to be in hsv format already. See :class:`~kornia.color.AdjustSaturation` for", "(gamma < 0.0).any(): raise ValueError(f\"Gamma must be non-negative. Got {gamma}\")", "values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def", "not a torch.Tensor. Got {type(input)}\") if not isinstance(brightness_factor, (float, torch.Tensor,)):", "larger than 1 make the shadows darker, while gamma smaller", "out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_contrast(input:", "-> torch.Tensor: # type: ignore return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module):", "x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor", "[-PI, PI]. PI and -PI give complete reversal of hue", "of (\\*, N). brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per", "* torch.pow(input, gamma) # Truncate between pixel values out: torch.Tensor", "type: ignore return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r\"\"\"Perform gamma correction", "Union[float, torch.Tensor]) -> None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor] =", "or torch.Tensor. Got {type(gamma)}\") if not isinstance(gain, (float, torch.Tensor)): raise", "dim=-1) gain = torch.unsqueeze(gain, dim=-1) # Apply the gamma correction", "Brightness of an image. See :class:`~kornia.color.AdjustBrightness` for details. \"\"\" if", "brightness_factor = torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]:", "to each channel x_adjust: torch.Tensor = input + brightness_factor #", "factor must be non-negative. Got {saturation_factor}\") for _ in input.shape[1:]:", "of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted", "original image while 2 will enhance the saturation by a", "Got {type(input)}\") if not isinstance(gamma, (float, torch.Tensor)): raise TypeError(f\"The gamma", "dark regions lighter. gain (float, optional): The constant multiplier. Default", "compleatly black image, 1 does not modify the input image", "for _ in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) # Apply", "image is expected to be in the range of [0,", "gamma correction x_adjust: torch.Tensor = gain * torch.pow(input, gamma) #", "image while any other non-negative number modify the brightness by", "out def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor]", "\"\"\" def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None: super(AdjustContrast, self).__init__()", "adjusted in the shape of (\\*, N). brightness_factor (Union[float, torch.Tensor]):", "torch.Tensor: r\"\"\"Adjust color saturation of an image. Expecting input to", "Union[float, torch.Tensor]) -> None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor] =", "torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color saturation of", "the shape of (\\*, N). hue_factor (float): How much to", "def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color", "torch.unsqueeze(hue_factor, dim=-1) # unpack the hsv values h, s, v", "torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float,", "this factor. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, contrast_factor:", "factor. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, contrast_factor: Union[float,", "not PIL. Hence, the output differs from TorchVision. The input", "be a positive float or torch.Tensor. Got {type(gain)}\") if isinstance(gamma,", "torch.Tensor. Got {type(input)}\") if not isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f\"The", "in positive and negative direction respectively. 0 means no shift.", "(\\*, N). brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element", ":class:`~kornia.color.AdjustSaturation` for details. \"\"\" # convert the rgb image to", "image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the", "PI and -PI give complete reversal of hue channel in", "the range between\" f\" [-PI, PI]. Got {type(hue_factor)}\") if isinstance(hue_factor,", "= torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi) |", "Therefore, both -PI and PI will give an image with", "torch.Tensor.\" f\"Got {type(saturation_factor)}\") if isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor", "should be a float number or torch.Tensor in the range", "isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor", "_ in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness", "self.contrast_factor: Union[float, torch.Tensor] = contrast_factor def forward(self, input: torch.Tensor) ->", "(\\*, N). gamma (float): Non negative real number, same as", "to be adjusted in the shape of (\\*, N). saturation_factor", "gamma smaller than 1 make dark regions lighter. gain (float,", "of (\\*, N). contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per", "= torch.unsqueeze(gain, dim=-1) # Apply the gamma correction x_adjust: torch.Tensor", "torch.cat([h, s_out, v], dim=-3) return out def adjust_saturation(input: torch.Tensor, saturation_factor:", "to be in hsv format already. See :class:`~kornia.color.AdjustHue` for details.", "hue out: torch.Tensor = torch.cat([h_out, s, v], dim=-3) return out", "ValueError(f\"Saturation factor must be non-negative. Got {saturation_factor}\") for _ in", "image. Expecting input to be in hsv format already. See", "torch.chunk(input, chunks=3, dim=-3) # transform the hue value and appl", "torch.Tensor] = saturation_factor def forward(self, input: torch.Tensor) -> torch.Tensor: #", "channel. Should be in [-PI, PI]. PI and -PI give", "torch.Tensor. \" f\"Got {type(contrast_factor)}\") if isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor])", "in the shape of (\\*, N). contrast_factor (Union[float, torch.Tensor]): Contrast", "x_adjust: torch.Tensor = input + brightness_factor # Truncate between pixel", "non-negative. Got {contrast_factor}\") for _ in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor,", "r\"\"\"Adjust Contrast of an image. See :class:`~kornia.color.AdjustContrast` for details. \"\"\"", "torch.Tensor] = 1.) -> None: super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor]", "(Union[float, torch.Tensor]): Contrast adjust factor per element in the batch.", "(hue_factor > pi)).any(): raise ValueError(f\"Hue-factor must be in the range", "for details. \"\"\" # convert the rgb image to hsv", "is not a torch.Tensor. Got {type(input)}\") if not isinstance(gamma, (float,", "darker, while gamma smaller than 1 make dark regions lighter.", "to be adjusted in the shape of (\\*, N). gamma", "be in hsv format already. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\"", "= torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any(): raise", "image. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" # convert the rgb", "torch.Tensor] = gamma self.gain: Union[float, torch.Tensor] = gain def forward(self,", "ignore return adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module): r\"\"\"Adjust Contrast of", "\"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type is not a", "nn from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from kornia.constants import pi", "max=1) # pack back back the corrected hue out: torch.Tensor", "if not isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f\"The hue_factor should be", "raise ValueError(f\"Contrast factor must be non-negative. Got {contrast_factor}\") for _", "from kornia.constants import pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor])", "an image. See :class:`~kornia.color.AdjustContrast` for details. \"\"\" if not torch.is_tensor(input):", "adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Contrast of", "while any other non-negative number modify the brightness by this", "be non-negative. Got {contrast_factor}\") for _ in input.shape[1:]: contrast_factor =", "in the batch. 0 does not modify the input image", "gamma) # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust,", "the conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) # convert back", "torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) ->", "Got {contrast_factor}\") for _ in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)", "an image. Expecting input to be in hsv format already.", "is expected to be an RGB image in the range", "N). gamma (float): Non negative real number, same as γ\\gammaγ", "lighter. gain (float, optional): The constant multiplier. Default 1. Returns:", "gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor:", "the shape of (\\*, N). saturation_factor (float): How much to", "channel in HSV space in positive and negative direction respectively.", "as nn from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from kornia.constants import", "modify the brightness by this factor. Returns: torch.Tensor: Adjusted image.", "-> None: super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor] = gamma self.gain:", "for _ in input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1) gain =", "not modify the input image while any other number modify", "contrast factor to each channel x_adjust: torch.Tensor = input *", "isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _", "Apply the gamma correction x_adjust: torch.Tensor = gain * torch.pow(input,", "= gain def forward(self, input: torch.Tensor) -> torch.Tensor: # type:", "This implementation aligns OpenCV, not PIL. Hence, the output differs", "element in the batch. 0 does not modify the input", "r\"\"\"Adjust color saturation of an image. See :class:`~kornia.color.AdjustSaturation` for details.", "and appl module s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0,", "not a torch.Tensor. Got {type(input)}\") if not isinstance(saturation_factor, (float, torch.Tensor,)):", "saturation of an image. Expecting input to be in hsv", "ValueError(f\"Gamma must be non-negative. Got {gamma}\") if (gain < 0.0).any():", "# convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return", "a float number or torch.Tensor in the range between\" f\"", "not a torch.Tensor. Got {type(input)}\") if not isinstance(gamma, (float, torch.Tensor)):", "torch.Tensor = rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor =", "# unpack the hsv values h, s, v = torch.chunk(input,", "raise ValueError(f\"Gain must be non-negative. Got {gain}\") for _ in", "gamma = torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain, dim=-1) # Apply", "factor of 2. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self,", "# type: ignore return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r\"\"\"Adjust hue", "See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input", "channel x_adjust: torch.Tensor = input + brightness_factor # Truncate between", "2. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, saturation_factor: Union[float,", "Should be in [-PI, PI]. PI and -PI give complete", "a torch.Tensor. Got {type(input)}\") if not isinstance(brightness_factor, (float, torch.Tensor,)): raise", "than 1 make dark regions lighter. gain (float, optional): The", "Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, gamma: Union[float, torch.Tensor],", "Args: input (torch.Tensor): Image/Input to be adjusted in the shape", "an image. See :class:`~kornia.color.AdjustHue` for details. \"\"\" # convert the", "Got {type(gain)}\") if isinstance(gamma, float): gamma = torch.tensor([gamma]) if isinstance(gain,", "_ in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast", "import pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:", "return out def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor:", "r\"\"\"Adjust color saturation of an image. Expecting input to be", "gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if (gamma < 0.0).any(): raise ValueError(f\"Gamma", "not torch.is_tensor(input): raise TypeError(f\"Input type is not a torch.Tensor. Got", "enhance the saturation by a factor of 2. Returns: torch.Tensor:", "[0, 1]. Args: input (torch.Tensor): Image/Input to be adjusted in", "adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of", "(float, torch.Tensor)): raise TypeError(f\"The hue_factor should be a float number", "per element in the batch. 0 generates a compleatly black", "hue value and appl module s_out: torch.Tensor = torch.clamp(s *", ":class:`~kornia.color.AdjustContrast` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type", "out def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust", "torch.Tensor = gain * torch.pow(input, gamma) # Truncate between pixel", "torch.Tensor] = gain def forward(self, input: torch.Tensor) -> torch.Tensor: #", "input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1) # unpack the hsv values", "= torch.cat([h_out, s, v], dim=-3) return out def adjust_hue(input: torch.Tensor,", "1.0) return out def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) ->", "{type(brightness_factor)}\") if isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype)", "-> None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor] = saturation_factor def", "min=0, max=1) # pack back back the corrected hue out:", "regions lighter. gain (float, optional): The constant multiplier. Default 1.", "constant multiplier. Default 1. Returns: torch.Tensor: Adjusted image. \"\"\" def", "{contrast_factor}\") for _ in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) #", "torch.Tensor]): Contrast adjust factor per element in the batch. 0", "non-negative number modify the brightness by this factor. Returns: torch.Tensor:", "brightness_factor # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust,", "Adjusted image. \"\"\" def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None:", "factor to each channel x_adjust: torch.Tensor = input * contrast_factor", "to be in hsv format already. See :class:`~kornia.color.AdjustSaturation` for details.", "saturation_factor should be a float number or torch.Tensor.\" f\"Got {type(saturation_factor)}\")", "torch.Tensor = input * contrast_factor # Truncate between pixel values", "multiplier. Default 1. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self,", "1. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, gamma: Union[float,", "for _ in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1) # unpack", "much to shift the hue channel. Should be in [-PI,", "AdjustSaturation(nn.Module): r\"\"\"Adjust color saturation of an image. The input image", "torch.Tensor) -> torch.Tensor: # type: ignore return adjust_contrast(input, self.contrast_factor) class", "+ brightness_factor # Truncate between pixel values out: torch.Tensor =", "of (\\*, N). hue_factor (float): How much to shift the", "transform the hue value and appl module s_out: torch.Tensor =", "brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) #", "of [0, 1]. Args: input (torch.Tensor): Image/Input to be adjusted", "not isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should be either", "input (torch.Tensor): Image/Input to be adjusted in the shape of", "details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type is not", "Adjusted image. \"\"\" def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float,", "gain = torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if", "saturation_factor, min=0, max=1) # pack back back the corrected hue", "image. \"\"\" def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor]", "be adjusted in the shape of (\\*, N). brightness_factor (Union[float,", "1 does not modify the input image while any other", "input (torch.Tensor): Image/Tensor to be adjusted in the shape of", "adjust_saturation_raw(x_hsv, saturation_factor) # convert back to rgb out: torch.Tensor =", "torch.Tensor = torch.cat([h_out, s, v], dim=-3) return out def adjust_hue(input:", "gives the original image. Returns: torch.Tensor: Adjusted image. \"\"\" def", "torch.Tensor]) -> None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor] = brightness_factor", "out def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust", "should be a positive float or torch.Tensor. Got {type(gain)}\") if", "factor should be either a float or torch.Tensor. \" f\"Got", "modify the input image while any other non-negative number modify", "in the shape of (\\*, N). hue_factor (float): How much", "image. See :class:`~kornia.color.AdjustGamma` for details. \"\"\" if not torch.is_tensor(input): raise", "1]. Args: input (torch.Tensor): Image/Input to be adjusted in the", "be in [-PI, PI]. PI and -PI give complete reversal", "{type(gamma)}\") if not isinstance(gain, (float, torch.Tensor)): raise TypeError(f\"The gain should", "float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor <", "input + brightness_factor # Truncate between pixel values out: torch.Tensor", "hue of an image. The input image is expected to", "float number or torch.Tensor.\" f\"Got {type(saturation_factor)}\") if isinstance(saturation_factor, float): saturation_factor", "in the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor", "a float number or torch.Tensor.\" f\"Got {type(saturation_factor)}\") if isinstance(saturation_factor, float):", "torch.cat([h_out, s, v], dim=-3) return out def adjust_hue(input: torch.Tensor, hue_factor:", "be adjusted in the shape of (\\*, N). gamma (float):", "raise TypeError(f\"The gain should be a positive float or torch.Tensor.", "< 0).any(): raise ValueError(f\"Saturation factor must be non-negative. Got {saturation_factor}\")", "-PI and PI will give an image with complementary colors", "self.gain) class AdjustContrast(nn.Module): r\"\"\"Adjust Contrast of an image. This implementation", "or torch.Tensor. Got {type(gain)}\") if isinstance(gamma, float): gamma = torch.tensor([gamma])", "type is not a torch.Tensor. Got {type(input)}\") if not isinstance(hue_factor,", "-> torch.Tensor: r\"\"\"Adjust hue of an image. Expecting input to", "-> torch.Tensor: # type: ignore return adjust_gamma(input, self.gamma, self.gain) class", "torch.Tensor: # type: ignore return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r\"\"\"Adjust", "brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element in the", "super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor] = brightness_factor def forward(self, input:", "color saturation of an image. Expecting input to be in", "-> torch.Tensor: r\"\"\"Adjust Brightness of an image. See :class:`~kornia.color.AdjustBrightness` for", "0.0, 1.0) return out def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor])", "saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color saturation of an", "torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_contrast(input: torch.Tensor,", "should be either a float or torch.Tensor. \" f\"Got {type(brightness_factor)}\")", "raise ValueError(f\"Saturation factor must be non-negative. Got {saturation_factor}\") for _", "None: super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor] = gamma self.gain: Union[float,", "Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Brightness of an image. See", "the batch. 0 generates a compleatly black image, 1 does", "adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness of an image. This", "gamma = gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if (gamma < 0.0).any():", "module s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1) #", "isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor", "convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out", "if (saturation_factor < 0).any(): raise ValueError(f\"Saturation factor must be non-negative.", "input (torch.Tensor): Image to be adjusted in the shape of", "gamma correction on an image. See :class:`~kornia.color.AdjustGamma` for details. \"\"\"", "raise TypeError(f\"The hue_factor should be a float number or torch.Tensor", "gamma correction on an image. The input image is expected", "def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Contrast", "hsv_to_rgb(x_adjusted) return out def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor], gain:", "self).__init__() self.saturation_factor: Union[float, torch.Tensor] = saturation_factor def forward(self, input: torch.Tensor)", "self.saturation_factor) class AdjustHue(nn.Module): r\"\"\"Adjust hue of an image. The input", "self.hue_factor) class AdjustGamma(nn.Module): r\"\"\"Perform gamma correction on an image. The", "torch.clamp(x_adjust, 0.0, 1.0) return out class AdjustSaturation(nn.Module): r\"\"\"Adjust color saturation", "colors while 0 gives the original image. Returns: torch.Tensor: Adjusted", "gain (float, optional): The constant multiplier. Default 1. Returns: torch.Tensor:", "positive float or torch.Tensor. Got {type(gamma)}\") if not isinstance(gain, (float,", "torch.Tensor]) -> None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor] = saturation_factor", "batch. 0 does not modify the input image while any", "factor must be non-negative. Got {contrast_factor}\") for _ in input.shape[1:]:", "The input image is expected to be in the range", "torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of an image. Expecting input", "of an image. See :class:`~kornia.color.AdjustHue` for details. \"\"\" # convert", "float or torch.Tensor. Got {type(gain)}\") if isinstance(gamma, float): gamma =", "r\"\"\"Adjust Brightness of an image. See :class:`~kornia.color.AdjustBrightness` for details. \"\"\"", "a torch.Tensor. Got {type(input)}\") if not isinstance(gamma, (float, torch.Tensor)): raise", "torch.Tensor. Got {type(input)}\") if not isinstance(gamma, (float, torch.Tensor)): raise TypeError(f\"The", "s_out, v], dim=-3) return out def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float,", "torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain, dim=-1) # Apply the gamma", "None: super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor] = hue_factor def forward(self,", "back the corrected hue out: torch.Tensor = torch.cat([h_out, s, v],", "shift. Therefore, both -PI and PI will give an image", "expected to be in the range of [0, 1]. Args:", "while gamma smaller than 1 make dark regions lighter. gain", "be either a float or torch.Tensor. \" f\"Got {type(contrast_factor)}\") if", "be a float number or torch.Tensor in the range between\"", "= torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness factor to each channel", "adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r\"\"\"Perform gamma correction on an image.", "torch.Tensor,)): raise TypeError(f\"The saturation_factor should be a float number or", "on an image. See :class:`~kornia.color.AdjustGamma` for details. \"\"\" if not", "gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None:", "def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.)", "r\"\"\"Perform gamma correction on an image. See :class:`~kornia.color.AdjustGamma` for details.", "Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, saturation_factor: Union[float, torch.Tensor])", "space in positive and negative direction respectively. 0 means no", "= torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain, dim=-1) # Apply the", "Args: input (torch.Tensor): Image to be adjusted in the shape", "give a black and white image, 1 will give the", "= input + brightness_factor # Truncate between pixel values out:", "\" f\"Got {type(brightness_factor)}\") if isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor", "-> torch.Tensor: # type: ignore return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module):", "= 2 * pi.item() h_out: torch.Tensor = torch.fmod(h + hue_factor,", "the hsv values h, s, v = torch.chunk(input, chunks=3, dim=-3)", "number, same as γ\\gammaγ in the equation. gamma larger than", "__init__(self, contrast_factor: Union[float, torch.Tensor]) -> None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float,", "= torch.unsqueeze(saturation_factor, dim=-1) # unpack the hsv values h, s,", "the brightness. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, brightness_factor:", "the gamma correction x_adjust: torch.Tensor = gain * torch.pow(input, gamma)", "of an image. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" # convert", "the range of [0, 1]. Args: input (torch.Tensor): Image to", "corrected hue out: torch.Tensor = torch.cat([h, s_out, v], dim=-3) return", "the range of [0, 1]. Args: input (torch.Tensor): Image/Input to", "return out def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float,", "gain should be a positive float or torch.Tensor. Got {type(gain)}\")", "isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f\"The saturation_factor should be a float", "float or torch.Tensor. \" f\"Got {type(brightness_factor)}\") if isinstance(brightness_factor, float): brightness_factor", "output differs from TorchVision. The input image is expected to", "each channel x_adjust: torch.Tensor = input + brightness_factor # Truncate", "{hue_factor}\") for _ in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1) #", "Brightness adjust factor per element in the batch. 0 does", "def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] =", "Got {type(input)}\") if not isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor", "not isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f\"The hue_factor should be a", "f\"Got {type(saturation_factor)}\") if isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor =", "while 2 will enhance the saturation by a factor of", "while any other number modify the brightness. Returns: torch.Tensor: Adjusted", "and PI will give an image with complementary colors while", "out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out class AdjustSaturation(nn.Module):", "forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_gamma(input,", "torch.Tensor)): raise TypeError(f\"The gamma should be a positive float or", "{gain}\") for _ in input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1) gain", "2 * pi.item() h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor)", "r\"\"\"Perform gamma correction on an image. The input image is", "a float or torch.Tensor. \" f\"Got {type(contrast_factor)}\") if isinstance(contrast_factor, float):", "if isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if", "= torch.chunk(input, chunks=3, dim=-3) # transform the hue value and", "not isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f\"The saturation_factor should be a", "a torch.Tensor. Got {type(input)}\") if not isinstance(saturation_factor, (float, torch.Tensor,)): raise", "dim=-3) # transform the hue value and appl module s_out:", "torch.Tensor in the range between\" f\" [-PI, PI]. Got {type(hue_factor)}\")", "divisor: float = 2 * pi.item() h_out: torch.Tensor = torch.fmod(h", "if not isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should be", "The input image is expected to be an RGB image", "in the shape of (\\*, N). saturation_factor (float): How much", "saturation. 0 will give a black and white image, 1", "\"\"\" def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None: super(AdjustSaturation, self).__init__()", "the shadows darker, while gamma smaller than 1 make dark", "perform the conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) # convert", "input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_contrast(input, self.contrast_factor)", "hue_factor: Union[float, torch.Tensor]) -> None: super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor]", "to adjust the saturation. 0 will give a black and", ":class:`~kornia.color.AdjustBrightness` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type", "= hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi) | (hue_factor > pi)).any():", "be non-negative. Got {gain}\") for _ in input.shape[1:]: gamma =", "complete reversal of hue channel in HSV space in positive", "< 0.0).any(): raise ValueError(f\"Gamma must be non-negative. Got {gamma}\") if", "1.) -> None: super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor] = gamma", "number or torch.Tensor.\" f\"Got {type(saturation_factor)}\") if isinstance(saturation_factor, float): saturation_factor =", "out: torch.Tensor = torch.cat([h_out, s, v], dim=-3) return out def", "of an image. See :class:`~kornia.color.AdjustBrightness` for details. \"\"\" if not", "AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness of an image. This implementation aligns OpenCV,", "adjusted in the shape of (\\*, N). saturation_factor (float): How", "__init__(self, brightness_factor: Union[float, torch.Tensor]) -> None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float,", "is not a torch.Tensor. Got {type(input)}\") if not isinstance(brightness_factor, (float,", "PI will give an image with complementary colors while 0", "give complete reversal of hue channel in HSV space in", "_ in input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain,", "dim=-1) # Apply contrast factor to each channel x_adjust: torch.Tensor", "# Apply contrast factor to each channel x_adjust: torch.Tensor =", "= contrast_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type:", "< 0).any(): raise ValueError(f\"Contrast factor must be non-negative. Got {contrast_factor}\")", "out: torch.Tensor = torch.cat([h, s_out, v], dim=-3) return out def", "0).any(): raise ValueError(f\"Saturation factor must be non-negative. Got {saturation_factor}\") for", "values h, s, v = torch.chunk(input, chunks=3, dim=-3) # transform", "must be in the range [-PI, PI]. Got {hue_factor}\") for", "adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of", "of an image. Expecting input to be in hsv format", "ValueError(f\"Gain must be non-negative. Got {gain}\") for _ in input.shape[1:]:", "-> torch.Tensor: r\"\"\"Adjust color saturation of an image. See :class:`~kornia.color.AdjustSaturation`", "torch.Tensor. Got {type(input)}\") if not isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f\"The", "See :class:`~kornia.color.AdjustHue` for details. \"\"\" # convert the rgb image", "(float, torch.Tensor,)): raise TypeError(f\"The factor should be either a float", "The constant multiplier. Default 1. Returns: torch.Tensor: Adjusted image. \"\"\"", "brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Brightness of an image.", "in [-PI, PI]. PI and -PI give complete reversal of", "< -pi) | (hue_factor > pi)).any(): raise ValueError(f\"Hue-factor must be", "raise ValueError(f\"Hue-factor must be in the range [-PI, PI]. Got", "in the shape of (\\*, N). gamma (float): Non negative", "(\\*, N). saturation_factor (float): How much to adjust the saturation.", "the shape of (\\*, N). brightness_factor (Union[float, torch.Tensor]): Brightness adjust", "AdjustContrast(nn.Module): r\"\"\"Adjust Contrast of an image. This implementation aligns OpenCV,", "dim=-3) return out def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) ->", "contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any(): raise ValueError(f\"Contrast factor must be", "torch.Tensor: r\"\"\"Adjust Contrast of an image. See :class:`~kornia.color.AdjustContrast` for details.", "import rgb_to_hsv, hsv_to_rgb from kornia.constants import pi def adjust_saturation_raw(input: torch.Tensor,", "hue_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore", "torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if (gamma <", "torch.Tensor,)): raise TypeError(f\"The factor should be either a float or", "to shift the hue channel. Should be in [-PI, PI].", "1 will give the original image while 2 will enhance", "image. \"\"\" def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None: super(AdjustHue,", "float number or torch.Tensor in the range between\" f\" [-PI,", "of [0, 1]. Args: input (torch.Tensor): Image to be adjusted", "torch.Tensor: Adjusted image. \"\"\" def __init__(self, gamma: Union[float, torch.Tensor], gain:", "γ\\gammaγ in the equation. gamma larger than 1 make the", "= adjust_hue_raw(x_hsv, hue_factor) # convert back to rgb out: torch.Tensor", "the equation. gamma larger than 1 make the shadows darker,", "gain: Union[float, torch.Tensor] = 1.) -> None: super(AdjustGamma, self).__init__() self.gamma:", "Got {type(hue_factor)}\") if isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor]) hue_factor =", "r\"\"\"Adjust hue of an image. The input image is expected", "to be adjusted in the shape of (\\*, N). contrast_factor", "torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) # convert back to rgb out:", "the rgb image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) #", "return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r\"\"\"Adjust hue of an image.", "class AdjustHue(nn.Module): r\"\"\"Adjust hue of an image. The input image", "torch import torch.nn as nn from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb", "isinstance(gain, float): gain = torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain =", "brightness factor to each channel x_adjust: torch.Tensor = input +", "Got {type(gamma)}\") if not isinstance(gain, (float, torch.Tensor)): raise TypeError(f\"The gain", "shape of (\\*, N). gamma (float): Non negative real number,", "rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input: torch.Tensor,", "1.0) return out class AdjustSaturation(nn.Module): r\"\"\"Adjust color saturation of an", "the saturation. 0 will give a black and white image,", "saturation of an image. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" #", "color saturation of an image. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\"", "return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r\"\"\"Perform gamma correction on an", "pi)).any(): raise ValueError(f\"Hue-factor must be in the range [-PI, PI].", "torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color saturation of an image. See", "if (contrast_factor < 0).any(): raise ValueError(f\"Contrast factor must be non-negative.", "color saturation of an image. The input image is expected", "= gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if (gamma < 0.0).any(): raise", "saturation_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore", "torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor: r\"\"\"Perform gamma", "adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.)", "image. The input image is expected to be in the", "the shape of (\\*, N). gamma (float): Non negative real", "does not modify the input image while any other number", "range between\" f\" [-PI, PI]. Got {type(hue_factor)}\") if isinstance(hue_factor, float):", "value and appl module divisor: float = 2 * pi.item()", "and white image, 1 will give the original image while", "= rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv,", "super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor] = hue_factor def forward(self, input:", "# perform the conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) #", "values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out class", "Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color saturation of an image.", "back the corrected hue out: torch.Tensor = torch.cat([h, s_out, v],", "if not isinstance(gamma, (float, torch.Tensor)): raise TypeError(f\"The gamma should be", "gamma = torch.tensor([gamma]) if isinstance(gain, float): gain = torch.tensor([gain]) gamma", "dim=-1) # Apply brightness factor to each channel x_adjust: torch.Tensor", "Got {type(input)}\") if not isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor", "positive and negative direction respectively. 0 means no shift. Therefore,", "corrected hue out: torch.Tensor = torch.cat([h_out, s, v], dim=-3) return", "if isinstance(gain, float): gain = torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain", "class AdjustContrast(nn.Module): r\"\"\"Adjust Contrast of an image. This implementation aligns", "an image. See :class:`~kornia.color.AdjustGamma` for details. \"\"\" if not torch.is_tensor(input):", "is not a torch.Tensor. Got {type(input)}\") if not isinstance(contrast_factor, (float,", "0.0, 1.0) return out def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor])", "in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1) # unpack the hsv", "self.contrast_factor) class AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness of an image. This implementation", "or torch.Tensor in the range between\" f\" [-PI, PI]. Got", "out def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust", "hue_factor) # convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted)", "torch.Tensor: r\"\"\"Perform gamma correction on an image. See :class:`~kornia.color.AdjustGamma` for", "dim=-3) # transform the hue value and appl module divisor:", "return out class AdjustSaturation(nn.Module): r\"\"\"Adjust color saturation of an image.", "hsv format already. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" if not", "be adjusted in the shape of (\\*, N). hue_factor (float):", "How much to adjust the saturation. 0 will give a", "return out def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:", "not isinstance(gain, (float, torch.Tensor)): raise TypeError(f\"The gain should be a", "a float or torch.Tensor. \" f\"Got {type(brightness_factor)}\") if isinstance(brightness_factor, float):", "= torch.cat([h, s_out, v], dim=-3) return out def adjust_saturation(input: torch.Tensor,", "Got {saturation_factor}\") for _ in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)", "hsv format already. See :class:`~kornia.color.AdjustHue` for details. \"\"\" if not", "of (\\*, N). saturation_factor (float): How much to adjust the", "on an image. The input image is expected to be", "in the batch. 0 generates a compleatly black image, 1", "gamma self.gain: Union[float, torch.Tensor] = gain def forward(self, input: torch.Tensor)", "image. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, hue_factor: Union[float,", "TypeError(f\"The factor should be either a float or torch.Tensor. \"", "Apply brightness factor to each channel x_adjust: torch.Tensor = input", "dim=-1) # unpack the hsv values h, s, v =", "\"\"\" # convert the rgb image to hsv x_hsv: torch.Tensor", "perform the conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) # convert", "{type(contrast_factor)}\") if isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype)", "an image with complementary colors while 0 gives the original", "0 gives the original image. Returns: torch.Tensor: Adjusted image. \"\"\"", "self.hue_factor: Union[float, torch.Tensor] = hue_factor def forward(self, input: torch.Tensor) ->", "input image while any other non-negative number modify the brightness", "be either a float or torch.Tensor. \" f\"Got {type(brightness_factor)}\") if", "give an image with complementary colors while 0 gives the", "* saturation_factor, min=0, max=1) # pack back back the corrected", "a torch.Tensor. Got {type(input)}\") if not isinstance(hue_factor, (float, torch.Tensor)): raise", "0).any(): raise ValueError(f\"Contrast factor must be non-negative. Got {contrast_factor}\") for", "in the range of [0, 1]. Args: input (torch.Tensor): Image/Input", "= torch.tensor([gamma]) if isinstance(gain, float): gain = torch.tensor([gain]) gamma =", "positive float or torch.Tensor. Got {type(gain)}\") if isinstance(gamma, float): gamma", "adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color saturation", "Args: input (torch.Tensor): Image/Tensor to be adjusted in the shape", "(float): How much to adjust the saturation. 0 will give", "contrast_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore", "TypeError(f\"The gamma should be a positive float or torch.Tensor. Got", "torch.Tensor: Adjusted image. \"\"\" def __init__(self, saturation_factor: Union[float, torch.Tensor]) ->", "Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of an image. See", "and -PI give complete reversal of hue channel in HSV", "torch.Tensor: r\"\"\"Adjust color saturation of an image. See :class:`~kornia.color.AdjustSaturation` for", "TypeError(f\"The gain should be a positive float or torch.Tensor. Got", "gain * torch.pow(input, gamma) # Truncate between pixel values out:", "__init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) ->", "Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, contrast_factor: Union[float, torch.Tensor])", "{saturation_factor}\") for _ in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) #", "def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None: super(AdjustBrightness, self).__init__() self.brightness_factor:", "# Apply brightness factor to each channel x_adjust: torch.Tensor =", "__init__(self, saturation_factor: Union[float, torch.Tensor]) -> None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float,", "1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in the", "raise TypeError(f\"The gamma should be a positive float or torch.Tensor.", "saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any(): raise ValueError(f\"Saturation factor must be", "be non-negative. Got {saturation_factor}\") for _ in input.shape[1:]: saturation_factor =", "# convert the rgb image to hsv x_hsv: torch.Tensor =", "+ hue_factor, divisor) # pack back back the corrected hue", "Got {gamma}\") if (gain < 0.0).any(): raise ValueError(f\"Gain must be", "= brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)", "no shift. Therefore, both -PI and PI will give an", "to be an RGB image in the range of [0,", "\"\"\" def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] =", "self.saturation_factor: Union[float, torch.Tensor] = saturation_factor def forward(self, input: torch.Tensor) ->", "range of [0, 1]. Args: input (torch.Tensor): Image/Input to be", "saturation_factor: Union[float, torch.Tensor]) -> None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor]", "= torch.fmod(h + hue_factor, divisor) # pack back back the", "either a float or torch.Tensor. \" f\"Got {type(contrast_factor)}\") if isinstance(contrast_factor,", "ignore return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness of an", "torch.Tensor. Got {type(gamma)}\") if not isinstance(gain, (float, torch.Tensor)): raise TypeError(f\"The", "with complementary colors while 0 gives the original image. Returns:", "contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any(): raise ValueError(f\"Contrast factor", "Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, brightness_factor: Union[float, torch.Tensor])", "saturation_factor (float): How much to adjust the saturation. 0 will", "white image, 1 will give the original image while 2", "float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor <", "{type(input)}\") if not isinstance(gamma, (float, torch.Tensor)): raise TypeError(f\"The gamma should", "should be a positive float or torch.Tensor. Got {type(gamma)}\") if", "torch.Tensor) -> torch.Tensor: # type: ignore return adjust_hue(input, self.hue_factor) class", "(float, torch.Tensor,)): raise TypeError(f\"The saturation_factor should be a float number", "v = torch.chunk(input, chunks=3, dim=-3) # transform the hue value", "0 does not modify the input image while any other", "f\" [-PI, PI]. Got {type(hue_factor)}\") if isinstance(hue_factor, float): hue_factor =", "HSV space in positive and negative direction respectively. 0 means", "PI]. Got {hue_factor}\") for _ in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor,", "PI]. Got {type(hue_factor)}\") if isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor]) hue_factor", "forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_contrast(input,", "torch.Tensor: Adjusted image. \"\"\" def __init__(self, brightness_factor: Union[float, torch.Tensor]) ->", "kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from kornia.constants import pi def adjust_saturation_raw(input:", "AdjustHue(nn.Module): r\"\"\"Adjust hue of an image. The input image is", "OpenCV, not PIL. Hence, the output differs from TorchVision. The", "hue out: torch.Tensor = torch.cat([h, s_out, v], dim=-3) return out", "2 will enhance the saturation by a factor of 2.", "adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Brightness of", "raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\") if", "reversal of hue channel in HSV space in positive and", "shadows darker, while gamma smaller than 1 make dark regions", ":class:`~kornia.color.AdjustHue` for details. \"\"\" # convert the rgb image to", "{type(hue_factor)}\") if isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype)", "{type(input)}\") if not isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should", "image while 2 will enhance the saturation by a factor", "an image. This implementation aligns OpenCV, not PIL. Hence, the", "torch.Tensor] = contrast_factor def forward(self, input: torch.Tensor) -> torch.Tensor: #", "(float): Non negative real number, same as γ\\gammaγ in the", "of an image. This implementation aligns OpenCV, not PIL. Hence,", "conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) # convert back to", "divisor) # pack back back the corrected hue out: torch.Tensor", "Image to be adjusted in the shape of (\\*, N).", "See :class:`~kornia.color.AdjustHue` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input", "to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the conversion", "TypeError(f\"The hue_factor should be a float number or torch.Tensor in", "(saturation_factor < 0).any(): raise ValueError(f\"Saturation factor must be non-negative. Got", "than 1 make the shadows darker, while gamma smaller than", "float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _ in", "gain def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore", "See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" # convert the rgb image", "Got {type(input)}\") if not isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f\"The hue_factor", "a positive float or torch.Tensor. Got {type(gamma)}\") if not isinstance(gain,", "float or torch.Tensor. Got {type(gamma)}\") if not isinstance(gain, (float, torch.Tensor)):", "float): gamma = torch.tensor([gamma]) if isinstance(gain, float): gain = torch.tensor([gain])", "raise TypeError(f\"The saturation_factor should be a float number or torch.Tensor.\"", "gamma should be a positive float or torch.Tensor. Got {type(gamma)}\")", "hue_factor (float): How much to shift the hue channel. Should", "torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Contrast of an image. See :class:`~kornia.color.AdjustContrast`", "-> None: super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor] = hue_factor def", "adjust factor per element in the batch. 0 does not", "already. See :class:`~kornia.color.AdjustHue` for details. \"\"\" if not torch.is_tensor(input): raise", "gamma larger than 1 make the shadows darker, while gamma", "0.0).any(): raise ValueError(f\"Gamma must be non-negative. Got {gamma}\") if (gain", "gamma (float): Non negative real number, same as γ\\gammaγ in", "torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) # convert back to rgb out:", "= hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor])", "Expecting input to be in hsv format already. See :class:`~kornia.color.AdjustHue`", "= 1.) -> None: super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor] =", "pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust", "Default 1. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, gamma:", "image while any other number modify the brightness. Returns: torch.Tensor:", "# pack back back the corrected hue out: torch.Tensor =", "= gain * torch.pow(input, gamma) # Truncate between pixel values", "and appl module divisor: float = 2 * pi.item() h_out:", ":class:`~kornia.color.AdjustHue` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type", "torch.Tensor] = brightness_factor def forward(self, input: torch.Tensor) -> torch.Tensor: #", "the corrected hue out: torch.Tensor = torch.cat([h, s_out, v], dim=-3)", "number modify the brightness. Returns: torch.Tensor: Adjusted image. \"\"\" def", "import torch import torch.nn as nn from kornia.color.hsv import rgb_to_hsv,", "to be in the range of [0, 1]. Args: input", "TorchVision. The input image is expected to be in the", "of an image. See :class:`~kornia.color.AdjustContrast` for details. \"\"\" if not", "= gain.to(input.device).to(input.dtype) if (gamma < 0.0).any(): raise ValueError(f\"Gamma must be", "{type(input)}\") if not isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should", "torch.Tensor)): raise TypeError(f\"The gain should be a positive float or", "ValueError(f\"Contrast factor must be non-negative. Got {contrast_factor}\") for _ in", "torch.unsqueeze(saturation_factor, dim=-1) # unpack the hsv values h, s, v", "torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness factor to each channel x_adjust:", "saturation_factor = torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any():", "r\"\"\"Adjust hue of an image. Expecting input to be in", "Union[float, torch.Tensor]) -> None: super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor] =", "{type(gain)}\") if isinstance(gamma, float): gamma = torch.tensor([gamma]) if isinstance(gain, float):", "Image/Input to be adjusted in the shape of (\\*, N).", "(float, torch.Tensor)): raise TypeError(f\"The gamma should be a positive float", "shape of (\\*, N). contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor", "input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain, dim=-1) #", "torch.Tensor. Got {type(input)}\") if not isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f\"The", "return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness of an image.", "contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Contrast of an image.", "image. \"\"\" def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None: super(AdjustBrightness,", "saturation_factor) # convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted)", "be in hsv format already. See :class:`~kornia.color.AdjustHue` for details. \"\"\"", "torch.Tensor. Got {type(input)}\") if not isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f\"The", "if not isinstance(gain, (float, torch.Tensor)): raise TypeError(f\"The gain should be", "the output differs from TorchVision. The input image is expected", "modify the input image while any other number modify the", "s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1) # pack", "type is not a torch.Tensor. Got {type(input)}\") if not isinstance(saturation_factor,", "raise ValueError(f\"Gamma must be non-negative. Got {gamma}\") if (gain <", "smaller than 1 make dark regions lighter. gain (float, optional):", "if not torch.is_tensor(input): raise TypeError(f\"Input type is not a torch.Tensor.", "input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_brightness(input, self.brightness_factor)", "the hue value and appl module s_out: torch.Tensor = torch.clamp(s", "torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None: super(AdjustGamma, self).__init__()", "return out def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor:", "hsv_to_rgb from kornia.constants import pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float,", "image. \"\"\" def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None: super(AdjustSaturation,", "real number, same as γ\\gammaγ in the equation. gamma larger", "back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def", "_ in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) # unpack the", "batch. 0 generates a compleatly black image, 1 does not", "# transform the hue value and appl module s_out: torch.Tensor", "be a positive float or torch.Tensor. Got {type(gamma)}\") if not", "0.0, 1.0) return out class AdjustSaturation(nn.Module): r\"\"\"Adjust color saturation of", "(torch.Tensor): Image/Input to be adjusted in the shape of (\\*,", "Union import torch import torch.nn as nn from kornia.color.hsv import", "isinstance(gain, (float, torch.Tensor)): raise TypeError(f\"The gain should be a positive", "adjusted in the shape of (\\*, N). hue_factor (float): How", "implementation aligns OpenCV, not PIL. Hence, the output differs from", "modify the brightness. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self,", "hue value and appl module divisor: float = 2 *", "Got {gain}\") for _ in input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1)", "torch.Tensor = torch.cat([h, s_out, v], dim=-3) return out def adjust_saturation(input:", "= rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv,", "PI]. PI and -PI give complete reversal of hue channel", "in the shape of (\\*, N). brightness_factor (Union[float, torch.Tensor]): Brightness", "self.gamma, self.gain) class AdjustContrast(nn.Module): r\"\"\"Adjust Contrast of an image. This", "import torch.nn as nn from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from", "negative real number, same as γ\\gammaγ in the equation. gamma", "must be non-negative. Got {saturation_factor}\") for _ in input.shape[1:]: saturation_factor", "def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Brightness", "between\" f\" [-PI, PI]. Got {type(hue_factor)}\") if isinstance(hue_factor, float): hue_factor", "hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi) | (hue_factor > pi)).any(): raise", "-> torch.Tensor: # type: ignore return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module):", "direction respectively. 0 means no shift. Therefore, both -PI and", "(float, torch.Tensor)): raise TypeError(f\"The gain should be a positive float", "if isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if", "0.0).any(): raise ValueError(f\"Gain must be non-negative. Got {gain}\") for _", "torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]: brightness_factor =", "brightness. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, brightness_factor: Union[float,", "_ in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1) # unpack the", "saturation of an image. The input image is expected to", "if (gain < 0.0).any(): raise ValueError(f\"Gain must be non-negative. Got", "image. This implementation aligns OpenCV, not PIL. Hence, the output", "hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust hue of an image.", "{type(saturation_factor)}\") if isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype)", "torch.Tensor: r\"\"\"Adjust Brightness of an image. See :class:`~kornia.color.AdjustBrightness` for details.", "not isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should be either", "= saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any(): raise ValueError(f\"Saturation factor must", "an image. See :class:`~kornia.color.AdjustBrightness` for details. \"\"\" if not torch.is_tensor(input):", "How much to shift the hue channel. Should be in", "r\"\"\"Adjust Contrast of an image. This implementation aligns OpenCV, not", "Union[float, torch.Tensor] = gamma self.gain: Union[float, torch.Tensor] = gain def", "None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor] = contrast_factor def forward(self,", "the shape of (\\*, N). contrast_factor (Union[float, torch.Tensor]): Contrast adjust", "means no shift. Therefore, both -PI and PI will give", "Adjusted image. \"\"\" def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None:", "Got {type(input)}\") if not isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f\"The saturation_factor", "image with complementary colors while 0 gives the original image.", "type is not a torch.Tensor. Got {type(input)}\") if not isinstance(brightness_factor,", "a factor of 2. Returns: torch.Tensor: Adjusted image. \"\"\" def", "out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_brightness(input:", "-> torch.Tensor: r\"\"\"Adjust Contrast of an image. See :class:`~kornia.color.AdjustContrast` for", "torch.Tensor = torch.fmod(h + hue_factor, divisor) # pack back back", "optional): The constant multiplier. Default 1. Returns: torch.Tensor: Adjusted image.", "from typing import Union import torch import torch.nn as nn", "appl module s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1)", "brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor,", "return out def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:", "details. \"\"\" # convert the rgb image to hsv x_hsv:", "self).__init__() self.gamma: Union[float, torch.Tensor] = gamma self.gain: Union[float, torch.Tensor] =", "[0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in", "make dark regions lighter. gain (float, optional): The constant multiplier.", "self.gamma: Union[float, torch.Tensor] = gamma self.gain: Union[float, torch.Tensor] = gain", "rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor)", "Expecting input to be in hsv format already. See :class:`~kornia.color.AdjustSaturation`", "__init__(self, hue_factor: Union[float, torch.Tensor]) -> None: super(AdjustHue, self).__init__() self.hue_factor: Union[float,", "super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor] = gamma self.gain: Union[float, torch.Tensor]", "shape of (\\*, N). hue_factor (float): How much to shift", "Got {hue_factor}\") for _ in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1)", "None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor] = brightness_factor def forward(self,", "torch.Tensor: r\"\"\"Adjust hue of an image. Expecting input to be", "{type(input)}\") if not isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f\"The saturation_factor should", "for _ in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) # Apply", "by a factor of 2. Returns: torch.Tensor: Adjusted image. \"\"\"", "< 0.0).any(): raise ValueError(f\"Gain must be non-negative. Got {gain}\") for", "hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) ->", "torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi) | (hue_factor", "a compleatly black image, 1 does not modify the input", "[-PI, PI]. Got {type(hue_factor)}\") if isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor])", "Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)", "in the equation. gamma larger than 1 make the shadows", "Non negative real number, same as γ\\gammaγ in the equation.", "saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) # unpack the hsv values h,", "rgb image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform", "= torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast factor to each channel", "pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out", "Image/Tensor to be adjusted in the shape of (\\*, N).", "in the range [-PI, PI]. Got {hue_factor}\") for _ in", "other number modify the brightness. Returns: torch.Tensor: Adjusted image. \"\"\"", "# type: ignore return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r\"\"\"Perform gamma", "either a float or torch.Tensor. \" f\"Got {type(brightness_factor)}\") if isinstance(brightness_factor,", "to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input:", "Adjusted image. \"\"\" def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None:", "in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) # unpack the hsv", "a positive float or torch.Tensor. Got {type(gain)}\") if isinstance(gamma, float):", "the conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) # convert back", "-> torch.Tensor: r\"\"\"Perform gamma correction on an image. See :class:`~kornia.color.AdjustGamma`", "Union[float, torch.Tensor]) -> None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor] =", "adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color saturation", "dim=-3) return out def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) ->", "brightness_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore", "Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None: super(AdjustGamma,", "the saturation by a factor of 2. Returns: torch.Tensor: Adjusted", "input image is expected to be in the range of", "element in the batch. 0 generates a compleatly black image,", "Adjusted image. \"\"\" def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None:", "unpack the hsv values h, s, v = torch.chunk(input, chunks=3,", "shift the hue channel. Should be in [-PI, PI]. PI", "in the range of [0, 1]. Args: input (torch.Tensor): Image", "= brightness_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type:", "self.brightness_factor: Union[float, torch.Tensor] = brightness_factor def forward(self, input: torch.Tensor) ->", "already. See :class:`~kornia.color.AdjustSaturation` for details. \"\"\" if not torch.is_tensor(input): raise", "from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from kornia.constants import pi def", "torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float,", "saturation by a factor of 2. Returns: torch.Tensor: Adjusted image.", "isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f\"The factor should be either a", "Contrast of an image. This implementation aligns OpenCV, not PIL.", "Contrast adjust factor per element in the batch. 0 generates", "any other number modify the brightness. Returns: torch.Tensor: Adjusted image.", "between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return", "image, 1 does not modify the input image while any", "= torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if (gamma", "of 2. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self, saturation_factor:", "value and appl module s_out: torch.Tensor = torch.clamp(s * saturation_factor,", "torch.Tensor: Adjusted image. \"\"\" def __init__(self, hue_factor: Union[float, torch.Tensor]) ->", "correction x_adjust: torch.Tensor = gain * torch.pow(input, gamma) # Truncate", "by this factor. Returns: torch.Tensor: Adjusted image. \"\"\" def __init__(self,", "torch.Tensor. \" f\"Got {type(brightness_factor)}\") if isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor])", "-> torch.Tensor: r\"\"\"Adjust hue of an image. See :class:`~kornia.color.AdjustHue` for", "class AdjustBrightness(nn.Module): r\"\"\"Adjust Brightness of an image. This implementation aligns", "equation. gamma larger than 1 make the shadows darker, while", "if isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for", "isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor", "= input * contrast_factor # Truncate between pixel values out:", "torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out class AdjustSaturation(nn.Module): r\"\"\"Adjust", "torch.Tensor: # type: ignore return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r\"\"\"Perform", "# Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0,", "def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust color", "the corrected hue out: torch.Tensor = torch.cat([h_out, s, v], dim=-3)", "in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast factor", "N). contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element in", "gain = gain.to(input.device).to(input.dtype) if (gamma < 0.0).any(): raise ValueError(f\"Gamma must", "be an RGB image in the range of [0, 1].", "a black and white image, 1 will give the original", "# perform the conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) #", "back back the corrected hue out: torch.Tensor = torch.cat([h, s_out,", "(float): How much to shift the hue channel. Should be", "module divisor: float = 2 * pi.item() h_out: torch.Tensor =", "\"\"\" def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None: super(AdjustHue, self).__init__()", "if isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if", "hue_factor = torch.unsqueeze(hue_factor, dim=-1) # unpack the hsv values h,", "torch.pow(input, gamma) # Truncate between pixel values out: torch.Tensor =", "TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\") if not", "torch.Tensor]) -> None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor] = contrast_factor", "to be adjusted in the shape of (\\*, N). brightness_factor", "image. See :class:`~kornia.color.AdjustContrast` for details. \"\"\" if not torch.is_tensor(input): raise", "\" f\"Got {type(contrast_factor)}\") if isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor", "1 make the shadows darker, while gamma smaller than 1", "range [-PI, PI]. Got {hue_factor}\") for _ in input.shape[1:]: hue_factor", "torch.Tensor: # type: ignore return adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module):", "generates a compleatly black image, 1 does not modify the", "torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_gamma(input: torch.Tensor, gamma: Union[float,", "torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor: r\"\"\"Adjust Brightness of an", "= torch.clamp(s * saturation_factor, min=0, max=1) # pack back back", "def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return", "torch.Tensor] = hue_factor def forward(self, input: torch.Tensor) -> torch.Tensor: #", "number modify the brightness by this factor. Returns: torch.Tensor: Adjusted", ":class:`~kornia.color.AdjustGamma` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input type", "Union[float, torch.Tensor] = contrast_factor def forward(self, input: torch.Tensor) -> torch.Tensor:", "to be adjusted in the shape of (\\*, N). hue_factor", "hue_factor = torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi)", "if ((hue_factor < -pi) | (hue_factor > pi)).any(): raise ValueError(f\"Hue-factor", "will give the original image while 2 will enhance the", "return out def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:", "{type(input)}\") if not isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f\"The hue_factor should", "is not a torch.Tensor. Got {type(input)}\") if not isinstance(saturation_factor, (float,", "input * contrast_factor # Truncate between pixel values out: torch.Tensor", "torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_brightness(input: torch.Tensor,", "in HSV space in positive and negative direction respectively. 0", "contrast_factor = torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any():", "the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to", "1]. Args: input (torch.Tensor): Image to be adjusted in the", "x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) # convert back to rgb", "class AdjustGamma(nn.Module): r\"\"\"Perform gamma correction on an image. The input", "as γ\\gammaγ in the equation. gamma larger than 1 make", "be adjusted in the shape of (\\*, N). saturation_factor (float):", "typing import Union import torch import torch.nn as nn from", "factor per element in the batch. 0 generates a compleatly", "def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None: super(AdjustSaturation, self).__init__() self.saturation_factor:", "self).__init__() self.brightness_factor: Union[float, torch.Tensor] = brightness_factor def forward(self, input: torch.Tensor)", "from TorchVision. The input image is expected to be in", "-> None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor] = contrast_factor def", "Union[float, torch.Tensor] = brightness_factor def forward(self, input: torch.Tensor) -> torch.Tensor:", "image, 1 will give the original image while 2 will", "kornia.constants import pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) ->", "number or torch.Tensor in the range between\" f\" [-PI, PI].", "See :class:`~kornia.color.AdjustBrightness` for details. \"\"\" if not torch.is_tensor(input): raise TypeError(f\"Input", "super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor] = saturation_factor def forward(self, input:", "AdjustGamma(nn.Module): r\"\"\"Perform gamma correction on an image. The input image", "torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any(): raise ValueError(f\"Contrast", "image. See :class:`~kornia.color.AdjustBrightness` for details. \"\"\" if not torch.is_tensor(input): raise" ]
[ "env and registers it with gym''' assert config_id in REGISTRY,", "global REGISTRY REGISTRY = [] for name, f in inspect.getmembers(configs,", "REGISTRY.append(config['env_id']) # Register environments with gym _register() def make(config_id, agent_list,", "agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from . import", "inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config = f() gym.envs.registration.register(", "import constants from . import forward_model from . import helpers", "game_state_file=None, render_mode='human'): '''Makes the pommerman env and registers it with", "in REGISTRY, \"Unknown configuration '{}'. \" \\ \"Possible values: {}\".format(config_id,", "_register(): global REGISTRY REGISTRY = [] for name, f in", "into the pommerman module''' import gym import inspect from .", "with gym''' assert config_id in REGISTRY, \"Unknown configuration '{}'. \"", ". import forward_model from . import helpers from . import", "<filename>pommerman/__init__.py '''Entry point into the pommerman module''' import gym import", "registers it with gym''' assert config_id in REGISTRY, \"Unknown configuration", "assert isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT so that", "values: {}\".format(config_id, REGISTRY) env = gym.make(config_id) for id_, agent in", "'{}'. \" \\ \"Possible values: {}\".format(config_id, REGISTRY) env = gym.make(config_id)", "not name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs']", "f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments", "enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT so", "continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id'])", "render_mode='human'): '''Makes the pommerman env and registers it with gym'''", "import helpers from . import utility from . import network", "def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env and", "from . import agents from . import configs from .", "'''Makes the pommerman env and registers it with gym''' assert", "inspect from . import agents from . import configs from", "from . import helpers from . import utility from .", "make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env and registers", "\"Unknown configuration '{}'. \" \\ \"Possible values: {}\".format(config_id, REGISTRY) env", "and registers it with gym''' assert config_id in REGISTRY, \"Unknown", "for id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE:", "agents.BaseAgent) # NOTE: This is IMPORTANT so that the agent", "_register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env", "agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env and registers it", "the pommerman module''' import gym import inspect from . import", "is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from", ". import constants from . import forward_model from . import", "= [] for name, f in inspect.getmembers(configs, inspect.isfunction): if not", ". import helpers from . import utility from . import", "# Register environments with gym _register() def make(config_id, agent_list, game_state_file=None,", "{}\".format(config_id, REGISTRY) env = gym.make(config_id) for id_, agent in enumerate(agent_list):", ". import configs from . import constants from . import", "id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with gym", ". import agents from . import configs from . import", "import utility from . import network gym.logger.set_level(40) REGISTRY = None", "forward_model from . import helpers from . import utility from", "gym.make(config_id) for id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) #", "import gym import inspect from . import agents from .", "agents from . import configs from . import constants from", "gym.logger.set_level(40) REGISTRY = None def _register(): global REGISTRY REGISTRY =", "IMPORTANT so that the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type'])", "configuration '{}'. \" \\ \"Possible values: {}\".format(config_id, REGISTRY) env =", "name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] )", "from . import network gym.logger.set_level(40) REGISTRY = None def _register():", "pommerman env and registers it with gym''' assert config_id in", "[] for name, f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'):", "config_id in REGISTRY, \"Unknown configuration '{}'. \" \\ \"Possible values:", "REGISTRY REGISTRY = [] for name, f in inspect.getmembers(configs, inspect.isfunction):", "entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with gym _register()", "def _register(): global REGISTRY REGISTRY = [] for name, f", "in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config = f()", "import network gym.logger.set_level(40) REGISTRY = None def _register(): global REGISTRY", "REGISTRY) env = gym.make(config_id) for id_, agent in enumerate(agent_list): assert", "\"Possible values: {}\".format(config_id, REGISTRY) env = gym.make(config_id) for id_, agent", "REGISTRY = None def _register(): global REGISTRY REGISTRY = []", "gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with", "environments with gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes", "initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from .", "= f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register", "it with gym''' assert config_id in REGISTRY, \"Unknown configuration '{}'.", "gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman", "from . import utility from . import network gym.logger.set_level(40) REGISTRY", "kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with gym _register() def", "that the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file)", "so that the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list)", "agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return", "import agents from . import configs from . import constants", "None def _register(): global REGISTRY REGISTRY = [] for name,", ") REGISTRY.append(config['env_id']) # Register environments with gym _register() def make(config_id,", "from . import constants from . import forward_model from .", "for name, f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue", "\" \\ \"Possible values: {}\".format(config_id, REGISTRY) env = gym.make(config_id) for", "assert config_id in REGISTRY, \"Unknown configuration '{}'. \" \\ \"Possible", "import forward_model from . import helpers from . import utility", "config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) #", "name, f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config", "from . import forward_model from . import helpers from .", "isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT so that the", "pommerman module''' import gym import inspect from . import agents", "helpers from . import utility from . import network gym.logger.set_level(40)", "inspect.isfunction): if not name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'],", "'''Entry point into the pommerman module''' import gym import inspect", "# NOTE: This is IMPORTANT so that the agent character", "import inspect from . import agents from . import configs", "the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode)", "if not name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'],", "from . import configs from . import constants from .", "id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This", "REGISTRY = [] for name, f in inspect.getmembers(configs, inspect.isfunction): if", "the pommerman env and registers it with gym''' assert config_id", "in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT", "gym import inspect from . import agents from . import", "= None def _register(): global REGISTRY REGISTRY = [] for", "point into the pommerman module''' import gym import inspect from", "NOTE: This is IMPORTANT so that the agent character is", "f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config =", "with gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the", "REGISTRY, \"Unknown configuration '{}'. \" \\ \"Possible values: {}\".format(config_id, REGISTRY)", "import configs from . import constants from . import forward_model", "Register environments with gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'):", "module''' import gym import inspect from . import agents from", ". import utility from . import network gym.logger.set_level(40) REGISTRY =", "constants from . import forward_model from . import helpers from", "env = gym.make(config_id) for id_, agent in enumerate(agent_list): assert isinstance(agent,", "agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This is", "is IMPORTANT so that the agent character is initialized agent.init_agent(id_,", "configs from . import constants from . import forward_model from", "\\ \"Possible values: {}\".format(config_id, REGISTRY) env = gym.make(config_id) for id_,", "utility from . import network gym.logger.set_level(40) REGISTRY = None def", "env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from . import cli", "gym''' assert config_id in REGISTRY, \"Unknown configuration '{}'. \" \\", ". import network gym.logger.set_level(40) REGISTRY = None def _register(): global", "network gym.logger.set_level(40) REGISTRY = None def _register(): global REGISTRY REGISTRY", "This is IMPORTANT so that the agent character is initialized", "= gym.make(config_id) for id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent)", "character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env" ]
[ "django.urls import path from .views import ProfileView urlpatterns = [", "import path from .views import ProfileView urlpatterns = [ path('',", "from django.urls import path from .views import ProfileView urlpatterns =", "path from .views import ProfileView urlpatterns = [ path('', ProfileView.as_view(),", "from .views import ProfileView urlpatterns = [ path('', ProfileView.as_view(), name='user-profile'),", ".views import ProfileView urlpatterns = [ path('', ProfileView.as_view(), name='user-profile'), ]" ]
[ "= dict() res[\"default_iris\"] = 0.92 res[\"iris_n_calls\"] = 5 res[\"default_iris_iterative\"] =", "res[\"digits_iterative_n_iter\"] = 64 res[\"default_digits_binary\"] = 0.9927140255009107 res[\"default_digits_multilabel\"] = 0.90997912489192 res[\"default_digits_multilabel_proba\"]", "= 32 res[\"default_iris_proba\"] = 0.29271032477461295 res[\"default_iris_sparse\"] = 0.4 res[\"default_digits\"] =", "res[\"default_digits\"] = 0.9156041287188829 res[\"digits_n_calls\"] = 6 res[\"default_digits_iterative\"] = 0.9156041287188829 res[\"digits_iterative_n_iter\"]", "res[\"iris_n_calls\"] = 5 res[\"default_iris_iterative\"] = 0.92 res[\"iris_iterative_n_iter\"] = 32 res[\"default_iris_proba\"]", "res[\"digits_n_calls\"] = 6 res[\"default_digits_iterative\"] = 0.9156041287188829 res[\"digits_iterative_n_iter\"] = 64 res[\"default_digits_binary\"]", "= 0.9156041287188829 res[\"digits_n_calls\"] = 6 res[\"default_digits_iterative\"] = 0.9156041287188829 res[\"digits_iterative_n_iter\"] =", "__test__ = True res = dict() res[\"default_iris\"] = 0.92 res[\"iris_n_calls\"]", "PassiveAggressive from .test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True", "['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter = {", "= 5 res[\"default_iris_iterative\"] = 0.92 res[\"iris_iterative_n_iter\"] = 32 res[\"default_iris_proba\"] =", "from .test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res", "0.9156041287188829 res[\"digits_n_calls\"] = 6 res[\"default_digits_iterative\"] = 0.9156041287188829 res[\"digits_iterative_n_iter\"] = 64", "6 res[\"default_digits_iterative\"] = 0.9156041287188829 res[\"digits_iterative_n_iter\"] = 64 res[\"default_digits_binary\"] = 0.9927140255009107", "64 res[\"default_digits_binary\"] = 0.9927140255009107 res[\"default_digits_multilabel\"] = 0.90997912489192 res[\"default_digits_multilabel_proba\"] = 1.0", "sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter = { 'name':", "0.9927140255009107 res[\"default_digits_multilabel\"] = 0.90997912489192 res[\"default_digits_multilabel_proba\"] = 1.0 res['ignore_hps'] = ['max_iter']", "= 0.92 res[\"iris_n_calls\"] = 5 res[\"default_iris_iterative\"] = 0.92 res[\"iris_iterative_n_iter\"] =", "1.0 res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive", "= sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter = { 'name': 'max_iter',", "import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res = dict()", "dict() res[\"default_iris\"] = 0.92 res[\"iris_n_calls\"] = 5 res[\"default_iris_iterative\"] = 0.92", "0.4 res[\"default_digits\"] = 0.9156041287188829 res[\"digits_n_calls\"] = 6 res[\"default_digits_iterative\"] = 0.9156041287188829", "0.90997912489192 res[\"default_digits_multilabel_proba\"] = 1.0 res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier", "res[\"default_iris_iterative\"] = 0.92 res[\"iris_iterative_n_iter\"] = 32 res[\"default_iris_proba\"] = 0.29271032477461295 res[\"default_iris_sparse\"]", "= 0.90997912489192 res[\"default_digits_multilabel_proba\"] = 1.0 res['ignore_hps'] = ['max_iter'] sk_mod =", "= PassiveAggressive step_hyperparameter = { 'name': 'max_iter', 'value': module.get_max_iter(), }", "= 0.9156041287188829 res[\"digits_iterative_n_iter\"] = 64 res[\"default_digits_binary\"] = 0.9927140255009107 res[\"default_digits_multilabel\"] =", "= 1.0 res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module =", "res[\"iris_iterative_n_iter\"] = 32 res[\"default_iris_proba\"] = 0.29271032477461295 res[\"default_iris_sparse\"] = 0.4 res[\"default_digits\"]", "= True res = dict() res[\"default_iris\"] = 0.92 res[\"iris_n_calls\"] =", "5 res[\"default_iris_iterative\"] = 0.92 res[\"iris_iterative_n_iter\"] = 32 res[\"default_iris_proba\"] = 0.29271032477461295", "from autosklearn.pipeline.components.classification.passive_aggressive import \\ PassiveAggressive from .test_base import BaseClassificationComponentTest class", "class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res = dict() res[\"default_iris\"] =", "res[\"default_iris_proba\"] = 0.29271032477461295 res[\"default_iris_sparse\"] = 0.4 res[\"default_digits\"] = 0.9156041287188829 res[\"digits_n_calls\"]", "res[\"default_digits_multilabel\"] = 0.90997912489192 res[\"default_digits_multilabel_proba\"] = 1.0 res['ignore_hps'] = ['max_iter'] sk_mod", "sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter = { 'name': 'max_iter', 'value':", "module = PassiveAggressive step_hyperparameter = { 'name': 'max_iter', 'value': module.get_max_iter(),", "0.29271032477461295 res[\"default_iris_sparse\"] = 0.4 res[\"default_digits\"] = 0.9156041287188829 res[\"digits_n_calls\"] = 6", "\\ PassiveAggressive from .test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ =", "res[\"default_iris_sparse\"] = 0.4 res[\"default_digits\"] = 0.9156041287188829 res[\"digits_n_calls\"] = 6 res[\"default_digits_iterative\"]", "0.92 res[\"iris_iterative_n_iter\"] = 32 res[\"default_iris_proba\"] = 0.29271032477461295 res[\"default_iris_sparse\"] = 0.4", "res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter", "res[\"default_iris\"] = 0.92 res[\"iris_n_calls\"] = 5 res[\"default_iris_iterative\"] = 0.92 res[\"iris_iterative_n_iter\"]", "= 0.92 res[\"iris_iterative_n_iter\"] = 32 res[\"default_iris_proba\"] = 0.29271032477461295 res[\"default_iris_sparse\"] =", "= 0.29271032477461295 res[\"default_iris_sparse\"] = 0.4 res[\"default_digits\"] = 0.9156041287188829 res[\"digits_n_calls\"] =", "res = dict() res[\"default_iris\"] = 0.92 res[\"iris_n_calls\"] = 5 res[\"default_iris_iterative\"]", "BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res = dict() res[\"default_iris\"]", "= 64 res[\"default_digits_binary\"] = 0.9927140255009107 res[\"default_digits_multilabel\"] = 0.90997912489192 res[\"default_digits_multilabel_proba\"] =", "0.9156041287188829 res[\"digits_iterative_n_iter\"] = 64 res[\"default_digits_binary\"] = 0.9927140255009107 res[\"default_digits_multilabel\"] = 0.90997912489192", "= ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter =", "import \\ PassiveAggressive from .test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__", "sklearn.linear_model from autosklearn.pipeline.components.classification.passive_aggressive import \\ PassiveAggressive from .test_base import BaseClassificationComponentTest", "= 0.9927140255009107 res[\"default_digits_multilabel\"] = 0.90997912489192 res[\"default_digits_multilabel_proba\"] = 1.0 res['ignore_hps'] =", "PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res = dict() res[\"default_iris\"] = 0.92", "True res = dict() res[\"default_iris\"] = 0.92 res[\"iris_n_calls\"] = 5", ".test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res =", "autosklearn.pipeline.components.classification.passive_aggressive import \\ PassiveAggressive from .test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest):", "32 res[\"default_iris_proba\"] = 0.29271032477461295 res[\"default_iris_sparse\"] = 0.4 res[\"default_digits\"] = 0.9156041287188829", "= 6 res[\"default_digits_iterative\"] = 0.9156041287188829 res[\"digits_iterative_n_iter\"] = 64 res[\"default_digits_binary\"] =", "res[\"default_digits_multilabel_proba\"] = 1.0 res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module", "0.92 res[\"iris_n_calls\"] = 5 res[\"default_iris_iterative\"] = 0.92 res[\"iris_iterative_n_iter\"] = 32", "res[\"default_digits_binary\"] = 0.9927140255009107 res[\"default_digits_multilabel\"] = 0.90997912489192 res[\"default_digits_multilabel_proba\"] = 1.0 res['ignore_hps']", "= 0.4 res[\"default_digits\"] = 0.9156041287188829 res[\"digits_n_calls\"] = 6 res[\"default_digits_iterative\"] =", "res[\"default_digits_iterative\"] = 0.9156041287188829 res[\"digits_iterative_n_iter\"] = 64 res[\"default_digits_binary\"] = 0.9927140255009107 res[\"default_digits_multilabel\"]", "import sklearn.linear_model from autosklearn.pipeline.components.classification.passive_aggressive import \\ PassiveAggressive from .test_base import" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. #", "as tfds from tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS =", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "[ [ \"Mars Hill College\", \"JOINED\", \"1973\" ], [ \"Mars", "] \"\"\" expected_examples = [{ 'input_text': { 'table': [ {", "tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS", "}, { 'column_header': 'predicate', 'row_number': 0, 'content': 'JOINED', }, {", "dart.Dart() with mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for", "joined in 1973.' }] dart_dataset = dart.Dart() with mock.patch.object( json,", "distributed under the License is distributed on an \"AS IS\"", "{ 'column_header': 'subject', 'row_number': 0, 'content': 'Mars Hill College', },", "# limitations under the License. \"\"\"Dart dataset tests.\"\"\" import json", "the specific language governing permissions and # limitations under the", "'content': '1973', }, { 'column_header': 'subject', 'row_number': 1, 'content': 'Mars", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "json import mock import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as", "DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS = { 'train': 2, 'validation':", "Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under", "] } ] \"\"\" expected_examples = [{ 'input_text': { 'table':", "}, { 'column_header': 'object', 'row_number': 0, 'content': '1973', }, {", "return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for i, (_, example) in enumerate(dart_dataset._generate_examples('')):", "= [{ 'input_text': { 'table': [ { 'column_header': 'subject', 'row_number':", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "2020 The TensorFlow Datasets Authors. # # Licensed under the", "{ 'column_header': 'predicate', 'row_number': 1, 'content': 'LOCATION', }, { 'column_header':", "except in compliance with the License. # You may obtain", "'validation': 1, 'test': 2, } def test_split_generators(self): json_str = \"\"\"", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "in 1973.' }] dart_dataset = dart.Dart() with mock.patch.object( json, 'load',", "'io'): for i, (_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i]) if", "0, 'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number':", "'LOCATION', }, { 'column_header': 'object', 'row_number': 1, 'content': 'Mars Hill,", "<gh_stars>1-10 # coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors.", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "1973.' }] dart_dataset = dart.Dart() with mock.patch.object( json, 'load', return_value=json.loads(json_str)),", "'object', 'row_number': 0, 'content': '1973', }, { 'column_header': 'subject', 'row_number':", "DATASET_CLASS = dart.Dart SPLITS = { 'train': 2, 'validation': 1,", "'content': 'LOCATION', }, { 'column_header': 'object', 'row_number': 1, 'content': 'Mars", "} def test_split_generators(self): json_str = \"\"\" [ { \"tripleset\": [", "as tf import tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart import dart", "\"Mars Hill, North Carolina\" ] ], \"subtree_was_extended\": true, \"annotations\": [", "Hill, North Carolina\" ] ], \"subtree_was_extended\": true, \"annotations\": [ {", "writing, software # distributed under the License is distributed on", "tf import tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart import dart class", "in writing, software # distributed under the License is distributed", "true, \"annotations\": [ { \"source\": \"WikiSQL_decl_sents\", \"text\": \"A school from", "0, 'content': 'JOINED', }, { 'column_header': 'object', 'row_number': 0, 'content':", "you may not use this file except in compliance with", "North Carolina, joined in 1973.\" } ] } ] \"\"\"", "{ \"tripleset\": [ [ \"Mars Hill College\", \"JOINED\", \"1973\" ],", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "dart_dataset = dart.Dart() with mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf,", "Carolina, joined in 1973.\" } ] } ] \"\"\" expected_examples", "with mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for i,", "'Mars Hill, North Carolina', }, ] }, 'target_text': 'A school", "\"LOCATION\", \"Mars Hill, North Carolina\" ] ], \"subtree_was_extended\": true, \"annotations\":", "{ 'column_header': 'predicate', 'row_number': 0, 'content': 'JOINED', }, { 'column_header':", "import mock import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds", "} ] } ] \"\"\" expected_examples = [{ 'input_text': {", "'object', 'row_number': 1, 'content': 'Mars Hill, North Carolina', }, ]", "'table': [ { 'column_header': 'subject', 'row_number': 0, 'content': 'Mars Hill", "use this file except in compliance with the License. #", "'input_text': { 'table': [ { 'column_header': 'subject', 'row_number': 0, 'content':", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "limitations under the License. \"\"\"Dart dataset tests.\"\"\" import json import", "import json import mock import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api", "governing permissions and # limitations under the License. \"\"\"Dart dataset", "Authors. # # Licensed under the Apache License, Version 2.0", "] ], \"subtree_was_extended\": true, \"annotations\": [ { \"source\": \"WikiSQL_decl_sents\", \"text\":", "'JOINED', }, { 'column_header': 'object', 'row_number': 0, 'content': '1973', },", "from tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS", "= dart.Dart SPLITS = { 'train': 2, 'validation': 1, 'test':", "CONDITIONS OF ANY KIND, either express or implied. # See", "2, 'validation': 1, 'test': 2, } def test_split_generators(self): json_str =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "[ \"Mars Hill College\", \"JOINED\", \"1973\" ], [ \"Mars Hill", "under the License. \"\"\"Dart dataset tests.\"\"\" import json import mock", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "\"tripleset\": [ [ \"Mars Hill College\", \"JOINED\", \"1973\" ], [", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "(_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i]) if __name__ == '__main__':", "'content': 'Mars Hill, North Carolina', }, ] }, 'target_text': 'A", "# You may obtain a copy of the License at", "\"A school from Mars Hill, North Carolina, joined in 1973.\"", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "'column_header': 'predicate', 'row_number': 1, 'content': 'LOCATION', }, { 'column_header': 'object',", "}, { 'column_header': 'predicate', 'row_number': 1, 'content': 'LOCATION', }, {", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "Mars Hill, North Carolina, joined in 1973.\" } ] }", "tests.\"\"\" import json import mock import tensorflow.compat.v2 as tf import", "College', }, { 'column_header': 'predicate', 'row_number': 1, 'content': 'LOCATION', },", "permissions and # limitations under the License. \"\"\"Dart dataset tests.\"\"\"", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "json_str = \"\"\" [ { \"tripleset\": [ [ \"Mars Hill", "from Mars Hill, North Carolina, joined in 1973.\" } ]", "Hill, North Carolina, joined in 1973.\" } ] } ]", "dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS = { 'train':", "'target_text': 'A school from Mars Hill, North Carolina, joined in", "dart.Dart SPLITS = { 'train': 2, 'validation': 1, 'test': 2,", "License. \"\"\"Dart dataset tests.\"\"\" import json import mock import tensorflow.compat.v2", "Carolina, joined in 1973.' }] dart_dataset = dart.Dart() with mock.patch.object(", "\"Mars Hill College\", \"LOCATION\", \"Mars Hill, North Carolina\" ] ],", "= dart.Dart() with mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'):", "example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i]) if __name__ == '__main__': tfds.testing.test_main()", "tfds from tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart", "# Copyright 2020 The TensorFlow Datasets Authors. # # Licensed", "coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. # #", "] }, 'target_text': 'A school from Mars Hill, North Carolina,", "'row_number': 1, 'content': 'LOCATION', }, { 'column_header': 'object', 'row_number': 1,", "Carolina\" ] ], \"subtree_was_extended\": true, \"annotations\": [ { \"source\": \"WikiSQL_decl_sents\",", "school from Mars Hill, North Carolina, joined in 1973.' }]", "joined in 1973.\" } ] } ] \"\"\" expected_examples =", "2, } def test_split_generators(self): json_str = \"\"\" [ { \"tripleset\":", "the License for the specific language governing permissions and #", "\"source\": \"WikiSQL_decl_sents\", \"text\": \"A school from Mars Hill, North Carolina,", "json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for i, (_, example)", "(the \"License\"); # you may not use this file except", "'row_number': 0, 'content': 'Mars Hill College', }, { 'column_header': 'predicate',", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 1, 'content':", "dataset tests.\"\"\" import json import mock import tensorflow.compat.v2 as tf", "[ { 'column_header': 'subject', 'row_number': 0, 'content': 'Mars Hill College',", "OR CONDITIONS OF ANY KIND, either express or implied. #", "0, 'content': '1973', }, { 'column_header': 'subject', 'row_number': 1, 'content':", "], \"subtree_was_extended\": true, \"annotations\": [ { \"source\": \"WikiSQL_decl_sents\", \"text\": \"A", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "mock.patch.object( tf, 'io'): for i, (_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example,", "the License is distributed on an \"AS IS\" BASIS, #", "in compliance with the License. # You may obtain a", "Hill, North Carolina, joined in 1973.' }] dart_dataset = dart.Dart()", "'test': 2, } def test_split_generators(self): json_str = \"\"\" [ {", "language governing permissions and # limitations under the License. \"\"\"Dart", "\"1973\" ], [ \"Mars Hill College\", \"LOCATION\", \"Mars Hill, North", "'column_header': 'object', 'row_number': 1, 'content': 'Mars Hill, North Carolina', },", "software # distributed under the License is distributed on an", "College\", \"LOCATION\", \"Mars Hill, North Carolina\" ] ], \"subtree_was_extended\": true,", "College', }, { 'column_header': 'predicate', 'row_number': 0, 'content': 'JOINED', },", "'column_header': 'predicate', 'row_number': 0, 'content': 'JOINED', }, { 'column_header': 'object',", "'row_number': 0, 'content': 'JOINED', }, { 'column_header': 'object', 'row_number': 0,", "Hill College\", \"LOCATION\", \"Mars Hill, North Carolina\" ] ], \"subtree_was_extended\":", "# # Unless required by applicable law or agreed to", "{ 'column_header': 'subject', 'row_number': 1, 'content': 'Mars Hill College', },", "}, 'target_text': 'A school from Mars Hill, North Carolina, joined", "Carolina', }, ] }, 'target_text': 'A school from Mars Hill,", "\"JOINED\", \"1973\" ], [ \"Mars Hill College\", \"LOCATION\", \"Mars Hill,", "} ] \"\"\" expected_examples = [{ 'input_text': { 'table': [", "}] dart_dataset = dart.Dart() with mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object(", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License. \"\"\"Dart dataset tests.\"\"\" import json import mock import", "test_split_generators(self): json_str = \"\"\" [ { \"tripleset\": [ [ \"Mars", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "[ { \"source\": \"WikiSQL_decl_sents\", \"text\": \"A school from Mars Hill,", "in 1973.\" } ] } ] \"\"\" expected_examples = [{", "'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 0, 'content':", "}, { 'column_header': 'subject', 'row_number': 1, 'content': 'Mars Hill College',", "1, 'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number':", "'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 1,", "'row_number': 1, 'content': 'Mars Hill, North Carolina', }, ] },", "Hill, North Carolina', }, ] }, 'target_text': 'A school from", "Version 2.0 (the \"License\"); # you may not use this", "SPLITS = { 'train': 2, 'validation': 1, 'test': 2, }", "\"\"\" expected_examples = [{ 'input_text': { 'table': [ { 'column_header':", "mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for i, (_,", "}, ] }, 'target_text': 'A school from Mars Hill, North", "law or agreed to in writing, software # distributed under", "North Carolina, joined in 1973.' }] dart_dataset = dart.Dart() with", "College\", \"JOINED\", \"1973\" ], [ \"Mars Hill College\", \"LOCATION\", \"Mars", "[ { \"tripleset\": [ [ \"Mars Hill College\", \"JOINED\", \"1973\"", "'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 0,", "'1973', }, { 'column_header': 'subject', 'row_number': 1, 'content': 'Mars Hill", "for i, (_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i]) if __name__", "{ \"source\": \"WikiSQL_decl_sents\", \"text\": \"A school from Mars Hill, North", "'content': 'JOINED', }, { 'column_header': 'object', 'row_number': 0, 'content': '1973',", "1973.\" } ] } ] \"\"\" expected_examples = [{ 'input_text':", "{ 'table': [ { 'column_header': 'subject', 'row_number': 0, 'content': 'Mars", "'row_number': 0, 'content': '1973', }, { 'column_header': 'subject', 'row_number': 1,", "implied. # See the License for the specific language governing", "from Mars Hill, North Carolina, joined in 1973.' }] dart_dataset", "The TensorFlow Datasets Authors. # # Licensed under the Apache", "under the Apache License, Version 2.0 (the \"License\"); # you", "[ \"Mars Hill College\", \"LOCATION\", \"Mars Hill, North Carolina\" ]", "\"License\"); # you may not use this file except in", "{ 'column_header': 'object', 'row_number': 0, 'content': '1973', }, { 'column_header':", "school from Mars Hill, North Carolina, joined in 1973.\" }", "1, 'content': 'LOCATION', }, { 'column_header': 'object', 'row_number': 1, 'content':", "\"Mars Hill College\", \"JOINED\", \"1973\" ], [ \"Mars Hill College\",", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "Hill College', }, { 'column_header': 'predicate', 'row_number': 0, 'content': 'JOINED',", "1, 'content': 'Mars Hill, North Carolina', }, ] }, 'target_text':", "\"text\": \"A school from Mars Hill, North Carolina, joined in", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= { 'train': 2, 'validation': 1, 'test': 2, } def", "Mars Hill, North Carolina, joined in 1973.' }] dart_dataset =", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "North Carolina\" ] ], \"subtree_was_extended\": true, \"annotations\": [ { \"source\":", "def test_split_generators(self): json_str = \"\"\" [ { \"tripleset\": [ [", "expected_examples = [{ 'input_text': { 'table': [ { 'column_header': 'subject',", "'row_number': 1, 'content': 'Mars Hill College', }, { 'column_header': 'predicate',", "i, (_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i]) if __name__ ==", "import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS = {", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS = { 'train': 2,", "'subject', 'row_number': 1, 'content': 'Mars Hill College', }, { 'column_header':", "import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "\"\"\" [ { \"tripleset\": [ [ \"Mars Hill College\", \"JOINED\",", "to in writing, software # distributed under the License is", "Datasets Authors. # # Licensed under the Apache License, Version", "], [ \"Mars Hill College\", \"LOCATION\", \"Mars Hill, North Carolina\"", "'column_header': 'subject', 'row_number': 0, 'content': 'Mars Hill College', }, {", "'column_header': 'subject', 'row_number': 1, 'content': 'Mars Hill College', }, {", "[{ 'input_text': { 'table': [ { 'column_header': 'subject', 'row_number': 0,", "'predicate', 'row_number': 1, 'content': 'LOCATION', }, { 'column_header': 'object', 'row_number':", "\"WikiSQL_decl_sents\", \"text\": \"A school from Mars Hill, North Carolina, joined", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "}, { 'column_header': 'object', 'row_number': 1, 'content': 'Mars Hill, North", "\"subtree_was_extended\": true, \"annotations\": [ { \"source\": \"WikiSQL_decl_sents\", \"text\": \"A school", "You may obtain a copy of the License at #", "1, 'test': 2, } def test_split_generators(self): json_str = \"\"\" [", "= \"\"\" [ { \"tripleset\": [ [ \"Mars Hill College\",", "Hill College\", \"JOINED\", \"1973\" ], [ \"Mars Hill College\", \"LOCATION\",", "\"\"\"Dart dataset tests.\"\"\" import json import mock import tensorflow.compat.v2 as", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "and # limitations under the License. \"\"\"Dart dataset tests.\"\"\" import", "'predicate', 'row_number': 0, 'content': 'JOINED', }, { 'column_header': 'object', 'row_number':", "'column_header': 'object', 'row_number': 0, 'content': '1973', }, { 'column_header': 'subject',", "required by applicable law or agreed to in writing, software", "'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for i, (_, example) in", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "tf, 'io'): for i, (_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i])", "\"annotations\": [ { \"source\": \"WikiSQL_decl_sents\", \"text\": \"A school from Mars", "tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart import", "'subject', 'row_number': 0, 'content': 'Mars Hill College', }, { 'column_header':", "with the License. # You may obtain a copy of", "Hill College', }, { 'column_header': 'predicate', 'row_number': 1, 'content': 'LOCATION',", "this file except in compliance with the License. # You", "{ 'column_header': 'object', 'row_number': 1, 'content': 'Mars Hill, North Carolina',", "the Apache License, Version 2.0 (the \"License\"); # you may", "tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS =", "North Carolina', }, ] }, 'target_text': 'A school from Mars", "{ 'train': 2, 'validation': 1, 'test': 2, } def test_split_generators(self):", "TensorFlow Datasets Authors. # # Licensed under the Apache License,", "import tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase):", "mock import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds from", "'A school from Mars Hill, North Carolina, joined in 1973.'", "'train': 2, 'validation': 1, 'test': 2, } def test_split_generators(self): json_str" ]
[ "trues = [] labels = [] with torch.no_grad(): for i,", "model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device)", "labels.shape[-1]) print('test shape:', preds.shape, trues.shape) # result save folder_path =", "enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y =", "batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train()", "time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD,", "= { 'informer':Informer, } if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in,", "def test(self, setting): test_data, test_loader = self._get_data(flag='test') self.model.eval() preds =", "adjust_learning_rate from utils.metrics import metric from sklearn.metrics import classification_report import", "= np.average(total_loss) self.model.train() return total_loss def train(self, setting): train_data, train_loader", "import os import time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def", "batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true", "_select_criterion(self): criterion = nn.MSELoss() return criterion def vali(self, vali_data, vali_loader,", "epoch)*train_steps - i) print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count", "np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader,", "self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze()", "= [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count +=", "adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def", "test_data, test_loader = self._get_data(flag = 'test') path = './checkpoints/'+setting if", "import Informer from utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics import", "import EarlyStopping, adjust_learning_rate from utils.metrics import metric from sklearn.metrics import", "Loss: {4:.7f}\".format( epoch + 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss,", "| loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item())) speed", "model_optim def _select_criterion(self): criterion = nn.MSELoss() return criterion def vali(self,", "models.model import Informer from utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics", "folder_path = './results/' + setting +'/' if not os.path.exists(folder_path): os.makedirs(folder_path)", "= 'train') vali_data, vali_loader = self._get_data(flag = 'val') test_data, test_loader", "1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop:", "self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len,", "import time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args):", "self.model.eval() total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x", "self.model def test(self, setting): test_data, test_loader = self._get_data(flag='test') self.model.eval() preds", "range(self.args.train_epochs): iter_count = 0 train_loss = [] self.model.train() for i,", "self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion) print(\"Epoch: {0},", "batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def _select_optimizer(self): model_optim", "= DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader", "shape:', preds.shape, trues.shape) # result save folder_path = './results/' +", "os import time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self,", "= self._select_optimizer() criterion = self._select_criterion() for epoch in range(self.args.train_epochs): iter_count", "self.model.eval() preds = [] trues = [] labels = []", "def _build_model(self): model_dict = { 'informer':Informer, } if self.args.model=='informer': model", "dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs", "= True; batch_size = args.batch_size else: shuffle_flag = True; drop_last", "return criterion def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss =", "+ 1, epoch + 1, loss.item())) speed = (time.time()-time_now)/iter_count left_time", "shape:', preds.shape, trues.shape) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues =", "= [] labels = [] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label)", "batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label", "os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse, mape, mspe = metric(preds, trues)", "= self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred =", "batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds = np.array(preds)", "self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation,", "def _get_data(self, flag): args = self.args data_dict = { 'SMAP':NASA_Anomaly,", "encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y", "batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze()", "= Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target", "= metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse,", "model_dict = { 'informer':Informer, } if self.args.model=='informer': model = model_dict[self.args.model](", "= False; drop_last = True; batch_size = args.batch_size else: shuffle_flag", "setting +'/' if not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse, mape,", "setting): train_data, train_loader = self._get_data(flag = 'train') vali_data, vali_loader =", "(batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark", "if not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse, mape, mspe =", "exp.exp_basic import Exp_Basic from models.model import Informer from utils.tools import", "preds.shape, trues.shape) # result save folder_path = './results/' + setting", "self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data =", "class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict", "} Data = data_dict[self.args.data] if flag == 'test': shuffle_flag =", "decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)", "test_loader = self._get_data(flag = 'test') path = './checkpoints/'+setting if not", "_build_model(self): model_dict = { 'informer':Informer, } if self.args.model=='informer': model =", "utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics import metric from sklearn.metrics", "true = batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss) total_loss =", "from utils.metrics import metric from sklearn.metrics import classification_report import numpy", "self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return", "torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x,", "= np.array(trues) labels = np.array(labels) print('test shape:', preds.shape, trues.shape) preds", "criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return total_loss def", "import torch.nn as nn from torch import optim from torch.utils.data", "total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x =", "torch import torch.nn as nn from torch import optim from", "if early_stopping.early_stop: print(\"Early stopping\") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path =", "EarlyStopping, adjust_learning_rate from utils.metrics import metric from sklearn.metrics import classification_report", "= [] trues = [] labels = [] with torch.no_grad():", "i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x =", "= np.array(preds) trues = np.array(trues) labels = np.array(labels) print('test shape:',", "preds.shape, trues.shape) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1,", "mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds) np.save(folder_path+'true.npy',", "batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true", "= self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss =", "True; batch_size = args.batch_size else: shuffle_flag = True; drop_last =", "= time.time() train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim", "{0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f}", "= self._get_data(flag='test') self.model.eval() preds = [] trues = [] labels", ") print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers,", "test_data, test_loader = self._get_data(flag='test') self.model.eval() preds = [] trues =", "__init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict = { 'informer':Informer,", "import metric from sklearn.metrics import classification_report import numpy as np", "dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true =", "+= 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark", "= './results/' + setting +'/' if not os.path.exists(folder_path): os.makedirs(folder_path) mae,", "preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1,", "'informer':Informer, } if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out,", "[] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1", "self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return model.double()", "preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels =", "batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1)", "batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()", "self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs,", "best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting): test_data,", "args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len],", "dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder", "= path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting): test_data, test_loader", "batch_size = args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len,", "- i) print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count =", "= np.array(labels) print('test shape:', preds.shape, trues.shape) preds = preds.reshape(-1, preds.shape[-2],", "# decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp],", "self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff,", "'WADI':WADI, } Data = data_dict[self.args.data] if flag == 'test': shuffle_flag", "True; batch_size = args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag,", "loss = criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return", "if flag == 'test': shuffle_flag = False; drop_last = True;", "| Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format(", "rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds) np.save(folder_path+'true.npy', trues) np.save(folder_path+'label.npy', labels) return", "args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set)) data_loader = DataLoader(", "torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y", "Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format( epoch + 1, train_steps,", "[] trues = [] labels = [] with torch.no_grad(): for", "data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data]", "self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers,", "+'/' if not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse, mape, mspe", "lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion = nn.MSELoss() return criterion", "import DataLoader import os import time import warnings warnings.filterwarnings('ignore') class", "early_stopping.early_stop: print(\"Early stopping\") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth'", "_select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion", "from models.model import Informer from utils.tools import EarlyStopping, adjust_learning_rate from", "trues = np.array(trues) labels = np.array(labels) print('test shape:', preds.shape, trues.shape)", "save folder_path = './results/' + setting +'/' if not os.path.exists(folder_path):", "vali_data, vali_loader, criterion): self.model.eval() total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label)", "self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1],", "'val') test_data, test_loader = self._get_data(flag = 'test') path = './checkpoints/'+setting", "preds.append(pred) trues.append(true) labels.append(batch_label) preds = np.array(preds) trues = np.array(trues) labels", "0 train_loss = [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):", "os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps = len(train_loader) early_stopping =", "import torch import torch.nn as nn from torch import optim", "batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp", "np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds) np.save(folder_path+'true.npy', trues)", "batch_size = args.batch_size else: shuffle_flag = True; drop_last = True;", "drop_last = True; batch_size = args.batch_size data_set = Data( root_path=args.root_path,", "Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target )", "mse, rmse, mape, mspe = metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae))", "NASA_Anomaly, WADI ) from exp.exp_basic import Exp_Basic from models.model import", "= args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len,", "= True; drop_last = True; batch_size = args.batch_size data_set =", "True; drop_last = True; batch_size = args.batch_size data_set = Data(", "= self._get_data(flag = 'test') path = './checkpoints/'+setting if not os.path.exists(path):", "criterion = self._select_criterion() for epoch in range(self.args.train_epochs): iter_count = 0", "iter_count = 0 time_now = time.time() loss.backward() model_optim.step() train_loss =", "== 'test': shuffle_flag = False; drop_last = True; batch_size =", "{:.4f}s'.format(speed, left_time)) iter_count = 0 time_now = time.time() loss.backward() model_optim.step()", "labels = labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape, trues.shape) # result", "Informer from utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics import metric", "data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set))", "batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) % 100==0:", "{ 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data] if flag", "flag == 'test': shuffle_flag = False; drop_last = True; batch_size", "self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu()", "enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device)", "= 'val') test_data, test_loader = self._get_data(flag = 'test') path =", "{1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss:", "print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch", "Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test", "criterion def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss = []", "= True; batch_size = args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path,", "os.makedirs(path) time_now = time.time() train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience,", "= { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data] if", "train_loss.append(loss.item()) if (i+1) % 100==0: print(\"\\titers: {0}, epoch: {1} |", "train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print(\"Early", "= batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) %", "true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds", "= args.batch_size else: shuffle_flag = True; drop_last = True; batch_size", "i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double()", "time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader,", "batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true = batch_y.detach().cpu()", "self.args.activation, self.device ) return model.double() def _get_data(self, flag): args =", "import ( NASA_Anomaly, WADI ) from exp.exp_basic import Exp_Basic from", "= batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds = np.array(preds) trues =", "optim from torch.utils.data import DataLoader import os import time import", "np.average(total_loss) self.model.train() return total_loss def train(self, setting): train_data, train_loader =", "vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion) print(\"Epoch: {0}, Steps:", "in enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y", "'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data] if flag == 'test':", "return self.model def test(self, setting): test_data, test_loader = self._get_data(flag='test') self.model.eval()", "def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict = {", "torch import optim from torch.utils.data import DataLoader import os import", "print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last)", "from sklearn.metrics import classification_report import numpy as np import torch", "from exp.exp_basic import Exp_Basic from models.model import Informer from utils.tools", "data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features,", "true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return total_loss def train(self,", "sklearn.metrics import classification_report import numpy as np import torch import", "pred = outputs.detach().cpu() true = batch_y.detach().cpu() loss = criterion(pred, true)", "0 time_now = time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss", "args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set)) data_loader = DataLoader( data_set,", "labels.append(batch_label) preds = np.array(preds) trues = np.array(trues) labels = np.array(labels)", "args.batch_size else: shuffle_flag = True; drop_last = True; batch_size =", "mape, mspe = metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae,", "vali_loader = self._get_data(flag = 'val') test_data, test_loader = self._get_data(flag =", "Test Loss: {4:.7f}\".format( epoch + 1, train_steps, train_loss, vali_loss, test_loss))", "= nn.MSELoss() return criterion def vali(self, vali_data, vali_loader, criterion): self.model.eval()", "self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return model.double() def _get_data(self,", "{2:.7f}\".format(i + 1, epoch + 1, loss.item())) speed = (time.time()-time_now)/iter_count", "self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return model.double() def _get_data(self, flag):", "warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self):", "for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y =", "= torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs =", "preds = np.array(preds) trues = np.array(trues) labels = np.array(labels) print('test", "input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) #", "EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion() for epoch", "total_loss = np.average(total_loss) self.model.train() return total_loss def train(self, setting): train_data,", "model_optim = self._select_optimizer() criterion = self._select_criterion() for epoch in range(self.args.train_epochs):", "'train') vali_data, vali_loader = self._get_data(flag = 'val') test_data, test_loader =", "trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape, trues.shape) #", "test_loss = self.vali(test_data, test_loader, criterion) print(\"Epoch: {0}, Steps: {1} |", "model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion =", "from torch.utils.data import DataLoader import os import time import warnings", "left_time)) iter_count = 0 time_now = time.time() loss.backward() model_optim.step() train_loss", "= batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds =", "return model.double() def _get_data(self, flag): args = self.args data_dict =", "model_optim.step() train_loss = np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss", "(batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark", "torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder", "early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion()", "i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double()", "= trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1]) print('test shape:',", "+ setting +'/' if not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse,", "drop_last=drop_last) return data_set, data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)", "loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item())) speed =", "data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def", "epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self,", "= batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss)", "self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return model.double() def", "stopping\") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return", "{1} | loss: {2:.7f}\".format(i + 1, epoch + 1, loss.item()))", "= [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device)", "time_now = time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss =", "self).__init__(args) def _build_model(self): model_dict = { 'informer':Informer, } if self.args.model=='informer':", "= speed*((self.args.train_epochs - epoch)*train_steps - i) print('\\tspeed: {:.4f}s/iter; left time:", "shuffle_flag = False; drop_last = True; batch_size = args.batch_size else:", "mae, mse, rmse, mape, mspe = metric(preds, trues) print('mse:{}, mae:{}'.format(mse,", "data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set,", "super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict = { 'informer':Informer, } if", "self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn,", "batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy()", "path = './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now = time.time()", "= EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion() for", "for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y =", "Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format( epoch +", "flag): args = self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI,", "Loss: {3:.7f} Test Loss: {4:.7f}\".format( epoch + 1, train_steps, train_loss,", "1, epoch + 1, loss.item())) speed = (time.time()-time_now)/iter_count left_time =", "time: {:.4f}s'.format(speed, left_time)) iter_count = 0 time_now = time.time() loss.backward()", "batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input", "self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting): test_data, test_loader = self._get_data(flag='test')", "def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self):", "= batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder", "Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format( epoch", "train_data, train_loader = self._get_data(flag = 'train') vali_data, vali_loader = self._get_data(flag", "warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def", "utils.metrics import metric from sklearn.metrics import classification_report import numpy as", "'./checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps =", "speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)", "data_set, data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim", "self._get_data(flag = 'train') vali_data, vali_loader = self._get_data(flag = 'val') test_data,", "= data_dict[self.args.data] if flag == 'test': shuffle_flag = False; drop_last", "(batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device)", "decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)", "import numpy as np import torch import torch.nn as nn", "[] labels = [] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in", "epoch + 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path)", "= self.vali(test_data, test_loader, criterion) print(\"Epoch: {0}, Steps: {1} | Train", "self.args.data[:-1], self.args.activation, self.device ) return model.double() def _get_data(self, flag): args", "trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape, trues.shape)", "% 100==0: print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i +", "- epoch)*train_steps - i) print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))", "flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set)) data_loader", "root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag,", "iter_count += 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y = batch_y.double()", "{ 'informer':Informer, } if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in,", "dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true =", "batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device)", "# result save folder_path = './results/' + setting +'/' if", "preds = [] trues = [] labels = [] with", "nn from torch import optim from torch.utils.data import DataLoader import", "= self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data", "False; drop_last = True; batch_size = args.batch_size else: shuffle_flag =", "= batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark =", "labels = [] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader):", "( NASA_Anomaly, WADI ) from exp.exp_basic import Exp_Basic from models.model", "args = self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, }", "train_loss = [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count", "= outputs.detach().cpu() true = batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss)", "if (i+1) % 100==0: print(\"\\titers: {0}, epoch: {1} | loss:", ") return model.double() def _get_data(self, flag): args = self.args data_dict", "= (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps - i) print('\\tspeed:", "batch_y) train_loss.append(loss.item()) if (i+1) % 100==0: print(\"\\titers: {0}, epoch: {1}", "= model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model,", "dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp,", "criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) % 100==0: print(\"\\titers: {0}, epoch:", "from utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics import metric from", "args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict = { 'informer':Informer, }", "return data_set, data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return", "1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark =", "size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set)) data_loader =", "[] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y", "self._get_data(flag='test') self.model.eval() preds = [] trues = [] labels =", "dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark,", "drop_last = True; batch_size = args.batch_size else: shuffle_flag = True;", "trues.shape) # result save folder_path = './results/' + setting +'/'", "= batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp =", "model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads,", "self.model.train() return total_loss def train(self, setting): train_data, train_loader = self._get_data(flag", "left_time = speed*((self.args.train_epochs - epoch)*train_steps - i) print('\\tspeed: {:.4f}s/iter; left", "test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print(\"Early stopping\") break adjust_learning_rate(model_optim,", "[] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x =", "_get_data(self, flag): args = self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly,", "test(self, setting): test_data, test_loader = self._get_data(flag='test') self.model.eval() preds = []", "result save folder_path = './results/' + setting +'/' if not", "loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) % 100==0: print(\"\\titers:", "mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds)", "print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss:", "+ 1, loss.item())) speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs -", "return model_optim def _select_criterion(self): criterion = nn.MSELoss() return criterion def", "train_loader = self._get_data(flag = 'train') vali_data, vali_loader = self._get_data(flag =", "(i+1) % 100==0: print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i", "enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device)", "DataLoader import os import time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic):", "outputs.detach().cpu() true = batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss) total_loss", "= self._get_data(flag = 'train') vali_data, vali_loader = self._get_data(flag = 'val')", "self._get_data(flag = 'test') path = './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path)", "= self._select_criterion() for epoch in range(self.args.train_epochs): iter_count = 0 train_loss", "'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data] if flag ==", "WADI ) from exp.exp_basic import Exp_Basic from models.model import Informer", "batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true = batch_y.detach().cpu() loss", "import optim from torch.utils.data import DataLoader import os import time", "= np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data,", "criterion) test_loss = self.vali(test_data, test_loader, criterion) print(\"Epoch: {0}, Steps: {1}", "np.array(labels) print('test shape:', preds.shape, trues.shape) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])", "mspe = metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse,", "data.data_loader_dad import ( NASA_Anomaly, WADI ) from exp.exp_basic import Exp_Basic", "vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion)", "path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting): test_data, test_loader =", "with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device)", "batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds = np.array(preds) trues = np.array(trues)", "self.vali(test_data, test_loader, criterion) print(\"Epoch: {0}, Steps: {1} | Train Loss:", "numpy as np import torch import torch.nn as nn from", "classification_report import numpy as np import torch import torch.nn as", "if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len,", "shuffle_flag = True; drop_last = True; batch_size = args.batch_size data_set", "optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion = nn.MSELoss() return", "print('test shape:', preds.shape, trues.shape) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues", "= time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss = self.vali(vali_data,", "if not os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps = len(train_loader)", "outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss", "total_loss def train(self, setting): train_data, train_loader = self._get_data(flag = 'train')", "= [] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x", "batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) #", "self._select_criterion() for epoch in range(self.args.train_epochs): iter_count = 0 train_loss =", "= self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion) print(\"Epoch:", "import classification_report import numpy as np import torch import torch.nn", "def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss = [] for", "self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers,", "self._get_data(flag = 'val') test_data, test_loader = self._get_data(flag = 'test') path", "batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark", "def train(self, setting): train_data, train_loader = self._get_data(flag = 'train') vali_data,", "{0}, epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch +", "features=args.features, target=args.target ) print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size,", "left time: {:.4f}s'.format(speed, left_time)) iter_count = 0 time_now = time.time()", "{2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}\".format( epoch + 1,", "batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds = np.array(preds) trues", "vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print(\"Early stopping\") break", "print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy',", "(time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps - i) print('\\tspeed: {:.4f}s/iter;", "= labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape, trues.shape) # result save", "+ 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if", "speed*((self.args.train_epochs - epoch)*train_steps - i) print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed,", "iter_count = 0 train_loss = [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark)", "rmse, mape, mspe = metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy',", "np import torch import torch.nn as nn from torch import", "= preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels", "train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer()", "- decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y =", "mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds) np.save(folder_path+'true.npy', trues) np.save(folder_path+'label.npy', labels)", "as nn from torch import optim from torch.utils.data import DataLoader", "DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def", "import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args)", "time_now = time.time() train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)", "def _select_criterion(self): criterion = nn.MSELoss() return criterion def vali(self, vali_data,", "len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return", "batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y)", "= './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps", "self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device", "loss.item())) speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps -", "outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label)", "test_loader = self._get_data(flag='test') self.model.eval() preds = [] trues = []", "epoch: {1} | loss: {2:.7f}\".format(i + 1, epoch + 1,", "as np import torch import torch.nn as nn from torch", "import Exp_Basic from models.model import Informer from utils.tools import EarlyStopping,", "= torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder -", "= criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return total_loss", "batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:],", "dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item())", "data_dict[self.args.data] if flag == 'test': shuffle_flag = False; drop_last =", "target=args.target ) print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag,", "train(self, setting): train_data, train_loader = self._get_data(flag = 'train') vali_data, vali_loader", "preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1])", "verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion() for epoch in", "= 'test') path = './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now", "{3:.7f} Test Loss: {4:.7f}\".format( epoch + 1, train_steps, train_loss, vali_loss,", "loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion)", "print('test shape:', preds.shape, trues.shape) # result save folder_path = './results/'", "np.array([mae, mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds) np.save(folder_path+'true.npy', trues) np.save(folder_path+'label.npy',", "epoch + 1, loss.item())) speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs", "np.array(trues) labels = np.array(labels) print('test shape:', preds.shape, trues.shape) preds =", "= self._get_data(flag = 'val') test_data, test_loader = self._get_data(flag = 'test')", "os.makedirs(folder_path) mae, mse, rmse, mape, mspe = metric(preds, trues) print('mse:{},", "} if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len,", "metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape,", "shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def _select_optimizer(self): model_optim =", "torch.nn as nn from torch import optim from torch.utils.data import", "self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout,", "{4:.7f}\".format( epoch + 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model,", "len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion =", "= optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion = nn.MSELoss()", "vali_loader, criterion): self.model.eval() total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in", "# encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)", "in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark =", "trues.shape) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2],", "for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x", "Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict =", "pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred)", "'./results/' + setting +'/' if not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse,", "trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1]) print('test", "'test': shuffle_flag = False; drop_last = True; batch_size = args.batch_size", "in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark =", "batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true = batch_y.detach().cpu() loss = criterion(pred,", "trues.append(true) labels.append(batch_label) preds = np.array(preds) trues = np.array(trues) labels =", "criterion = nn.MSELoss() return criterion def vali(self, vali_data, vali_loader, criterion):", "not os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps = len(train_loader) early_stopping", "= batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp =", "batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp", "self._select_optimizer() criterion = self._select_criterion() for epoch in range(self.args.train_epochs): iter_count =", "preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])", "= criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) % 100==0: print(\"\\titers: {0},", "Exp_Basic from models.model import Informer from utils.tools import EarlyStopping, adjust_learning_rate", ") from exp.exp_basic import Exp_Basic from models.model import Informer from", "= batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true = batch_y.detach().cpu() loss =", "= batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label =", "in range(self.args.train_epochs): iter_count = 0 train_loss = [] self.model.train() for", "time.time() train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim =", "test_loader, criterion) print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f}", "total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return total_loss def train(self, setting):", "'test') path = './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now =", "= len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion", "else: shuffle_flag = True; drop_last = True; batch_size = args.batch_size", "print(\"Early stopping\") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path))", "train_loss = np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss =", "torch.utils.data import DataLoader import os import time import warnings warnings.filterwarnings('ignore')", "vali_data, vali_loader = self._get_data(flag = 'val') test_data, test_loader = self._get_data(flag", "trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape,", "self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed,", "nn.MSELoss() return criterion def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss", "self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1 model_optim.zero_grad()", "i) print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count = 0", "data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def _select_optimizer(self):", "Data = data_dict[self.args.data] if flag == 'test': shuffle_flag = False;", "{:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count = 0 time_now =", "epoch in range(self.args.train_epochs): iter_count = 0 train_loss = [] self.model.train()", "break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model", "metric from sklearn.metrics import classification_report import numpy as np import", "for epoch in range(self.args.train_epochs): iter_count = 0 train_loss = []", "labels = np.array(labels) print('test shape:', preds.shape, trues.shape) preds = preds.reshape(-1,", "return total_loss def train(self, setting): train_data, train_loader = self._get_data(flag =", "model.double() def _get_data(self, flag): args = self.args data_dict = {", "= 0 time_now = time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss)", "early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print(\"Early stopping\") break adjust_learning_rate(model_optim, epoch+1,", "vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss = [] for i,", "outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred", "labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape, trues.shape) # result save folder_path", "self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device )", "criterion) print(\"Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali", "= 0 train_loss = [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in", "train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print(\"Early stopping\")", "self.device ) return model.double() def _get_data(self, flag): args = self.args", "self.model, path) if early_stopping.early_stop: print(\"Early stopping\") break adjust_learning_rate(model_optim, epoch+1, self.args)", "trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))", "100==0: print(\"\\titers: {0}, epoch: {1} | loss: {2:.7f}\".format(i + 1,", "1, loss.item())) speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps", "path) if early_stopping.early_stop: print(\"Early stopping\") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path", "print('\\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count = 0 time_now", "setting): test_data, test_loader = self._get_data(flag='test') self.model.eval() preds = [] trues", "criterion): self.model.eval() total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader):", "from data.data_loader_dad import ( NASA_Anomaly, WADI ) from exp.exp_basic import", "from torch import optim from torch.utils.data import DataLoader import os", "num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(),", "self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting):", "batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if", "np.array(preds) trues = np.array(trues) labels = np.array(labels) print('test shape:', preds.shape,", "= outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true)", "not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse, mape, mspe = metric(preds,", "model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor," ]
[ "transport. The message_string parameter is a string in the MySensors", "load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict )", "message_string parameter is a string in the MySensors message format.", "gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason", "message with the transport. The message_string parameter is a string", "None]: \"\"\"Receive a message for the gateway.\"\"\" def receive_message_callback(message_string: str)", "gateway nodes.\"\"\" gateway_nodes.update(nodes) return nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def gps_sensor_state_fixture() ->", "update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def", "patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True ) as transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True), patch( \"mysensors.task.load_fw\",", "gps_sensor_state: dict) -> Sensor: \"\"\"Load the gps sensor.\"\"\" nodes =", "used for integration set up.\"\"\" return serial_entry @pytest.fixture(name=\"integration\") async def", "the sound sensor state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def sound_sensor(gateway_nodes: dict[int,", "-> Sensor: \"\"\"Load the energy sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state)", "config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: \"\"\"Set up the mysensors", "receive_message_callback @pytest.fixture(name=\"gateway\") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) ->", "-> dict[int, Sensor]: \"\"\"Return the gateway nodes dict.\"\"\" return {}", "-> MockConfigEntry: \"\"\"Provide the config entry used for integration set", "gateway.logic(message_string) return receive_message_callback @pytest.fixture(name=\"gateway\") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry", "dict ) -> Sensor: \"\"\"Load the distance sensor.\"\"\" nodes =", "Callable, Generator import json from typing import Any from unittest.mock", "that accepts string messages.\"\"\" return transport.return_value.send @pytest.fixture(name=\"serial_entry\") async def serial_entry_fixture(hass:", "@pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: \"\"\"Provide the config entry", "node.\"\"\" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node = nodes[1] return node", "= transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name=\"gateway\") def gateway_fixture(", "temperature sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return", "gateway nodes dict.\"\"\" return {} @pytest.fixture(name=\"serial_transport\") async def serial_transport_fixture( gateway_nodes:", "return transport.return_value.send @pytest.fixture(name=\"serial_entry\") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: \"\"\"Create", "-> dict: \"\"\"Load the power sensor state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture", "transport.connect.side_effect = mock_connect @pytest.fixture(name=\"transport\") def transport_fixture(serial_transport: MagicMock) -> MagicMock: \"\"\"Return", "BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor import", "Sensor], text_node_state: dict) -> Sensor: \"\"\"Load the text child node.\"\"\"", "def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] )", "= MagicMock() persistence.save_sensors = MagicMock() async def mock_connect() -> None:", "node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def sound_sensor_state_fixture() -> dict: \"\"\"Load the sound", "\"\"\"Load nodes from via persistence.\"\"\" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors", "gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect", "# For some reason autospeccing does not recognize these methods.", "nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"power_sensor_state\",", "update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def", "is_device.side_effect = lambda device: device yield is_device @pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture()", "def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: \"\"\"Provide the config entry used", "= update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\")", "Sensor: \"\"\"Load the temperature sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node", "node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def distance_sensor_state_fixture() -> dict: \"\"\"Load the distance", "mock_mqtt_fixture(hass: HomeAssistant) -> None: \"\"\"Mock the MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\")", "from typing import Any from unittest.mock import AsyncMock, MagicMock, patch", "the power sensor state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def power_sensor(gateway_nodes: dict[int,", "the mysensors integration with a config entry.\"\"\" config: dict[str, Any]", "dict: \"\"\"Update the gateway nodes.\"\"\" gateway_nodes.update(nodes) return nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\")", "Sensor: \"\"\"Load the gps sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node", "gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None", "dict[int, Sensor], nodes: dict[int, Sensor] ) -> dict: \"\"\"Update the", "device: device yield is_device @pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture() -> dict[int, Sensor]:", "AsyncGenerator, Callable, Generator import json from typing import Any from", "out device tracker known devices storage.\"\"\" devices = mock_device_tracker_conf return", "node = nodes[1] return node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def energy_sensor_state_fixture() ->", "the temperature sensor state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def temperature_sensor( gateway_nodes:", "config) await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport: MagicMock,", "text node state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor],", ") -> Sensor: \"\"\"Load the distance sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes,", "mock that accepts string messages.\"\"\" return transport.return_value.send @pytest.fixture(name=\"serial_entry\") async def", "node state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state:", "serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor],", "string in the MySensors message format. \"\"\" gateway = transport.call_args[0][0]", "persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason autospeccing", "\"\"\" gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name=\"gateway\")", "DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION,", "autospec=True), patch( \"mysensors.task.load_fw\", autospec=True ), patch( \"mysensors.task.Persistence\", autospec=True ) as", ") as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield", "accepts string messages.\"\"\" return transport.return_value.send @pytest.fixture(name=\"serial_entry\") async def serial_entry_fixture(hass: HomeAssistant)", "BaseSyncGateway: \"\"\"Return a setup gateway.\"\"\" return transport.call_args[0][0] def load_nodes_state(fixture_path: str)", "-> Sensor: \"\"\"Load the power sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state)", "gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: \"\"\"Load the", "None]: \"\"\"Mock a serial transport.\"\"\" with patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True )", "gateway.\"\"\" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: \"\"\"Load mysensors", "for a serial gateway.\"\"\" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE:", "yield config_entry @pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry )", "load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor:", "}, ) return entry @pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry:", "\"\"\"Load the text node state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def text_node(gateway_nodes:", "entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\", CONF_DEVICE:", "return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) ->", "MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy import", "homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE,", "gateway_nodes_fixture() -> dict[int, Sensor]: \"\"\"Return the gateway nodes dict.\"\"\" return", "@pytest.fixture(name=\"serial_transport\") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, )", "dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor: \"\"\"Load the temperature", "transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value", "update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] ) -> dict:", "mysensors nodes fixture.\"\"\" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int,", "sensor state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor],", "serial port check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device: is_device.side_effect = lambda", "Sensor], energy_sensor_state: dict ) -> Sensor: \"\"\"Load the energy sensor.\"\"\"", "= mock_device_tracker_conf return devices @pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass: HomeAssistant) -> None:", "\"\"\"Create a config entry for a serial gateway.\"\"\" entry =", "autospeccing does not recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors", "gps_sensor_state_fixture() -> dict: \"\"\"Load the gps sensor state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\")", "Sensor], None]: \"\"\"Mock a serial transport.\"\"\" with patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True", "= nodes[1] return node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def energy_sensor_state_fixture() -> dict:", "gateway.\"\"\" def receive_message_callback(message_string: str) -> None: \"\"\"Receive a message with", "transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True), patch( \"mysensors.task.load_fw\", autospec=True ), patch( \"mysensors.task.Persistence\", autospec=True", "state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state:", "tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]:", "nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"temperature_sensor_state\",", "\"\"\"Load the text child node.\"\"\" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node", ") -> BaseSyncGateway: \"\"\"Return a setup gateway.\"\"\" return transport.call_args[0][0] def", "devices = mock_device_tracker_conf return devices @pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass: HomeAssistant) ->", "json from typing import Any from unittest.mock import AsyncMock, MagicMock,", "gateway.\"\"\" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\",", "Sensor], sound_sensor_state: dict) -> Sensor: \"\"\"Load the sound sensor.\"\"\" nodes", "DOMAIN, ) from homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component", "MockConfigEntry: \"\"\"Provide the config entry used for integration set up.\"\"\"", "dict.\"\"\" return {} @pytest.fixture(name=\"serial_transport\") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor],", "return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: \"\"\"Load mysensors nodes", "list[Device]: \"\"\"Mock out device tracker known devices storage.\"\"\" devices =", "cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] )", "dict: \"\"\"Load the temperature sensor state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def", "def load_nodes_state(fixture_path: str) -> dict: \"\"\"Load mysensors nodes fixture.\"\"\" return", "node @pytest.fixture(name=\"text_node_state\", scope=\"session\") def text_node_state_fixture() -> dict: \"\"\"Load the text", "= update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\")", "return node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def power_sensor_state_fixture() -> dict: \"\"\"Load the", "distance sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return", "distance sensor state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def distance_sensor( gateway_nodes: dict[int,", "str) -> None: \"\"\"Receive a message with the transport. The", "power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor: \"\"\"Load the power", "<filename>tests/components/mysensors/conftest.py \"\"\"Provide common mysensors fixtures.\"\"\" from __future__ import annotations from", "Sensor], distance_sensor_state: dict ) -> Sensor: \"\"\"Load the distance sensor.\"\"\"", "@pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture() -> dict[int, Sensor]: \"\"\"Return the gateway nodes", "import Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow", "-> Generator[MagicMock, None, None]: \"\"\"Patch the serial port check.\"\"\" with", "None: \"\"\"Receive a message with the transport. The message_string parameter", "def mock_mqtt_fixture(hass: HomeAssistant) -> None: \"\"\"Mock the MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN)", "gps sensor state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor],", "mock_schedule_save_sensors() -> None: \"\"\"Load nodes from via persistence.\"\"\" gateway =", "integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry,", "import json from typing import Any from unittest.mock import AsyncMock,", "\"\"\"Load the sound sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node =", "scope=\"session\") def distance_sensor_state_fixture() -> dict: \"\"\"Load the distance sensor state.\"\"\"", "-> Sensor: \"\"\"Load the sound sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state)", "( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core", "gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) -> Sensor: \"\"\"Load the", "receive_message( transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]: \"\"\"Receive", "update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def", "integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture() -> Generator[MagicMock, None, None]: \"\"\"Patch", "gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes:", "the distance sensor state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def distance_sensor( gateway_nodes:", "= update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\")", "@pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def gps_sensor_state_fixture() -> dict: \"\"\"Load the gps sensor", "sound_sensor_state: dict) -> Sensor: \"\"\"Load the sound sensor.\"\"\" nodes =", "as is_device: is_device.side_effect = lambda device: device yield is_device @pytest.fixture(name=\"gateway_nodes\")", "load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: \"\"\"Mock out device", "sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node", "# node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name=\"gateway\") def gateway_fixture( transport: MagicMock,", "MockConfigEntry: \"\"\"Create a config entry for a serial gateway.\"\"\" entry", "\"\"\"Load the gps sensor state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def gps_sensor(gateway_nodes:", "mock_device_tracker_conf return devices @pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass: HomeAssistant) -> None: \"\"\"Mock", "None]: \"\"\"Patch the serial port check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device:", "-> AsyncGenerator[MockConfigEntry, None]: \"\"\"Set up the mysensors integration with a", "= MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\", CONF_DEVICE: \"/test/device\",", "Sensor], temperature_sensor_state: dict ) -> Sensor: \"\"\"Load the temperature sensor.\"\"\"", "is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: \"\"\"Mock a serial", "\"\"\"Mock the start method.\"\"\" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0]", "DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry: MockConfigEntry) ->", "MockConfigEntry ) -> Callable[[str], None]: \"\"\"Receive a message for the", "def energy_sensor_state_fixture() -> dict: \"\"\"Load the energy sensor state.\"\"\" return", "import pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN", "def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: \"\"\"Mock out device tracker known", "-> AsyncGenerator[dict[int, Sensor], None]: \"\"\"Mock a serial transport.\"\"\" with patch(", "sound sensor state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor],", "def transport_fixture(serial_transport: MagicMock) -> MagicMock: \"\"\"Return the default mocked transport.\"\"\"", "node = nodes[1] return node @pytest.fixture(name=\"text_node_state\", scope=\"session\") def text_node_state_fixture() ->", "\"\"\"Mock a serial transport.\"\"\" with patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True ) as", "def is_serial_port_fixture() -> Generator[MagicMock, None, None]: \"\"\"Patch the serial port", "Sensor], power_sensor_state: dict) -> Sensor: \"\"\"Load the power sensor.\"\"\" nodes", "entry @pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: \"\"\"Provide the config", "json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor]", "), patch( \"mysensors.task.Persistence\", autospec=True ) as persistence_class: persistence = persistence_class.return_value", ") return entry @pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: \"\"\"Provide", "-> Sensor: \"\"\"Load the distance sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state)", "from collections.abc import AsyncGenerator, Callable, Generator import json from typing", "sound_sensor_state_fixture() -> dict: \"\"\"Load the sound sensor state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\")", "MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: \"\"\"Set up the", "yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int,", "data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\", CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, },", "import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL,", "power_sensor_state: dict) -> Sensor: \"\"\"Load the power sensor.\"\"\" nodes =", "some reason autospeccing does not recognize these methods. persistence.safe_load_sensors =", "str) -> dict: \"\"\"Load mysensors nodes fixture.\"\"\" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder)", "text_node_state_fixture() -> dict: \"\"\"Load the text node state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\")", "methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async def mock_connect()", "-> Sensor: \"\"\"Load the text child node.\"\"\" nodes = update_gateway_nodes(gateway_nodes,", "return node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def distance_sensor_state_fixture() -> dict: \"\"\"Load the", "tracker known devices storage.\"\"\" devices = mock_device_tracker_conf return devices @pytest.fixture(name=\"mqtt\")", "HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: \"\"\"Set", "transport.\"\"\" with patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True ) as transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True),", "\"\"\"Update the gateway nodes.\"\"\" gateway_nodes.update(nodes) return nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def", "nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"sound_sensor_state\",", "typing import Any from unittest.mock import AsyncMock, MagicMock, patch from", "@pytest.fixture(name=\"transport\") def transport_fixture(serial_transport: MagicMock) -> MagicMock: \"\"\"Return the default mocked", "\"\"\"Load the energy sensor state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def energy_sensor(", "from mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor", "return nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def gps_sensor_state_fixture() -> dict: \"\"\"Load the", "async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message(", "@pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass: HomeAssistant) -> None: \"\"\"Mock the MQTT integration.\"\"\"", "@pytest.fixture(name=\"integration\") async def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry", "\"\"\"Load the power sensor state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def power_sensor(gateway_nodes:", "MagicMock) -> MagicMock: \"\"\"Return the transport mock that accepts string", "with a config entry.\"\"\" config: dict[str, Any] = {} config_entry.add_to_hass(hass)", "a config entry.\"\"\" config: dict[str, Any] = {} config_entry.add_to_hass(hass) with", "in the MySensors message format. \"\"\" gateway = transport.call_args[0][0] #", "\"mysensors.task.Persistence\", autospec=True ) as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class,", ") -> Callable[[str], None]: \"\"\"Receive a message for the gateway.\"\"\"", "port check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device: is_device.side_effect = lambda device:", "nodes[1] return node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def distance_sensor_state_fixture() -> dict: \"\"\"Load", "serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: \"\"\"Create a config entry for a", "config_entry @pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry ) ->", "-> dict: \"\"\"Load mysensors nodes fixture.\"\"\" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def", "CONF_VERSION: \"2.3\", CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry", "from homeassistant.setup import async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True)", "power_sensor_state_fixture() -> dict: \"\"\"Load the power sensor state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\")", "CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant", "energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: \"\"\"Load", "\"\"\"Load the distance sensor state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def distance_sensor(", "patch( \"mysensors.task.load_fw\", autospec=True ), patch( \"mysensors.task.Persistence\", autospec=True ) as persistence_class:", "persistence.\"\"\" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors )", "patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry", "from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE,", "sensor state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state:", "Generator[MagicMock, None, None]: \"\"\"Patch the serial port check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\")", "return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int,", "from unittest.mock import AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway", "persistence.save_sensors = MagicMock() async def mock_connect() -> None: \"\"\"Mock the", "dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: \"\"\"Mock", "scope=\"session\") def text_node_state_fixture() -> dict: \"\"\"Load the text node state.\"\"\"", "MagicMock: \"\"\"Return the transport mock that accepts string messages.\"\"\" return", "the MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture() -> Generator[MagicMock, None,", "dict: \"\"\"Load mysensors nodes fixture.\"\"\" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes(", "= {} config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await async_setup_component(hass, DOMAIN, config)", "@pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor: \"\"\"Load", "config entry for a serial gateway.\"\"\" entry = MockConfigEntry( domain=DOMAIN,", "CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant from homeassistant.setup import", "load_nodes_state(fixture_path: str) -> dict: \"\"\"Load mysensors nodes fixture.\"\"\" return json.loads(load_fixture(fixture_path),", "patch from mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from", "\"mysensors.task.load_fw\", autospec=True ), patch( \"mysensors.task.Persistence\", autospec=True ) as persistence_class: persistence", "gps sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return", "domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\", CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE,", "def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor: \"\"\"Load the", "= nodes[1] return node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def temperature_sensor_state_fixture() -> dict:", "sensor state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state:", "= update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\")", ") -> dict: \"\"\"Update the gateway nodes.\"\"\" gateway_nodes.update(nodes) return nodes", "persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def", "start method.\"\"\" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport", "scope=\"session\") def gps_sensor_state_fixture() -> dict: \"\"\"Load the gps sensor state.\"\"\"", "= persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence:", "def receive_message_callback(message_string: str) -> None: \"\"\"Receive a message with the", "CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant from homeassistant.setup", "None transport.connect.side_effect = mock_connect @pytest.fixture(name=\"transport\") def transport_fixture(serial_transport: MagicMock) -> MagicMock:", "a serial transport.\"\"\" with patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True ) as transport_class,", "dict[int, Sensor], text_node_state: dict) -> Sensor: \"\"\"Load the text child", "\"\"\"Load the gps sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node =", ") -> AsyncGenerator[dict[int, Sensor], None]: \"\"\"Mock a serial transport.\"\"\" with", "@pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str],", "MagicMock() persistence.save_sensors = MagicMock() async def mock_connect() -> None: \"\"\"Mock", "def sound_sensor_state_fixture() -> dict: \"\"\"Load the sound sensor state.\"\"\" return", "import async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf:", "-> None: \"\"\"Load nodes from via persistence.\"\"\" gateway = transport_class.call_args[0][0]", "CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry: MockConfigEntry)", "nodes from via persistence.\"\"\" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors =", "MockConfigEntry ) -> BaseSyncGateway: \"\"\"Return a setup gateway.\"\"\" return transport.call_args[0][0]", "return serial_entry @pytest.fixture(name=\"integration\") async def integration_fixture( hass: HomeAssistant, transport: MagicMock,", "dict: \"\"\"Load the energy sensor state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def", "update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"text_node_state\", scope=\"session\") def", "import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: \"\"\"Mock", "\"\"\"Mock the MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture() -> Generator[MagicMock,", "{} @pytest.fixture(name=\"serial_transport\") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock,", "@pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def distance_sensor_state_fixture() -> dict: \"\"\"Load the distance sensor", "sensor state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor],", "list[Device]) -> list[Device]: \"\"\"Mock out device tracker known devices storage.\"\"\"", "mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class:", "the text node state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def text_node(gateway_nodes: dict[int,", "the gps sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1]", "dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: \"\"\"Load the energy", "await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport: MagicMock, integration:", "scope=\"session\") def sound_sensor_state_fixture() -> dict: \"\"\"Load the sound sensor state.\"\"\"", "\"\"\"Return a setup gateway.\"\"\" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) ->", "nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"distance_sensor_state\",", "homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from", "patch(\"mysensors.task.OTAFirmware\", autospec=True), patch( \"mysensors.task.load_fw\", autospec=True ), patch( \"mysensors.task.Persistence\", autospec=True )", "dict[int, Sensor]: \"\"\"Return the gateway nodes dict.\"\"\" return {} @pytest.fixture(name=\"serial_transport\")", "temperature_sensor_state_fixture() -> dict: \"\"\"Load the temperature sensor state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\")", "node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def power_sensor_state_fixture() -> dict: \"\"\"Load the power", "temperature sensor state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int,", "MockConfigEntry) -> MockConfigEntry: \"\"\"Provide the config entry used for integration", "a setup gateway.\"\"\" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict:", "autospec=True ) as transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True), patch( \"mysensors.task.load_fw\", autospec=True ),", "= update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\")", "return node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def temperature_sensor_state_fixture() -> dict: \"\"\"Load the", "state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict)", "node = nodes[1] return node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def sound_sensor_state_fixture() ->", "MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: \"\"\"Set up the mysensors integration", "dict[int, Sensor], gps_sensor_state: dict) -> Sensor: \"\"\"Load the gps sensor.\"\"\"", "-> Callable[[str], None]: \"\"\"Receive a message for the gateway.\"\"\" def", "gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name=\"gateway\") def", "HomeAssistant) -> None: \"\"\"Mock the MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def", "\"\"\"Return the gateway nodes dict.\"\"\" return {} @pytest.fixture(name=\"serial_transport\") async def", "message format. \"\"\" gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return", "from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest", "setup gateway.\"\"\" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: \"\"\"Load", "nodes dict.\"\"\" return {} @pytest.fixture(name=\"serial_transport\") async def serial_transport_fixture( gateway_nodes: dict[int,", "return entry @pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: \"\"\"Provide the", "@pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) ->", "AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason autospeccing does not", "\"\"\"Receive a message for the gateway.\"\"\" def receive_message_callback(message_string: str) ->", "\"\"\"Load mysensors nodes fixture.\"\"\" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes:", "from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE", "@pytest.fixture(name=\"serial_entry\") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: \"\"\"Create a config", "return node @pytest.fixture(name=\"text_node_state\", scope=\"session\") def text_node_state_fixture() -> dict: \"\"\"Load the", "MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: \"\"\"Mock out", "gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: \"\"\"Return a", "pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN as", "gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) #", "text child node.\"\"\" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node = nodes[1]", "config entry used for integration set up.\"\"\" return serial_entry @pytest.fixture(name=\"integration\")", "@pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor: \"\"\"Load", "the transport mock that accepts string messages.\"\"\" return transport.return_value.send @pytest.fixture(name=\"serial_entry\")", "Callable[[str], None]: \"\"\"Receive a message for the gateway.\"\"\" def receive_message_callback(message_string:", "storage.\"\"\" devices = mock_device_tracker_conf return devices @pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass: HomeAssistant)", "config entry.\"\"\" config: dict[str, Any] = {} config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\",", "gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor: \"\"\"Load the gps", "@pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def power_sensor_state_fixture() -> dict: \"\"\"Load the power sensor", ") -> Sensor: \"\"\"Load the energy sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes,", "import AsyncGenerator, Callable, Generator import json from typing import Any", "homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, )", "-> None: \"\"\"Mock the start method.\"\"\" transport.connect_task = MagicMock() gateway", "the config entry used for integration set up.\"\"\" return serial_entry", "-> dict: \"\"\"Load the sound sensor state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture", "None: \"\"\"Mock the start method.\"\"\" transport.connect_task = MagicMock() gateway =", "transport_write(transport: MagicMock) -> MagicMock: \"\"\"Return the transport mock that accepts", "nodes fixture.\"\"\" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor],", "string messages.\"\"\" return transport.return_value.send @pytest.fixture(name=\"serial_entry\") async def serial_entry_fixture(hass: HomeAssistant) ->", "return {} @pytest.fixture(name=\"serial_transport\") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port:", "energy sensor state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def energy_sensor( gateway_nodes: dict[int,", "a serial gateway.\"\"\" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL,", "None]: \"\"\"Set up the mysensors integration with a config entry.\"\"\"", "return serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: \"\"\"Return the", "MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture() -> Generator[MagicMock, None, None]:", "a string in the MySensors message format. \"\"\" gateway =", "recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async", "the distance sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1]", "mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import", "None, None]: \"\"\"Patch the serial port check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as", "def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int,", "the gateway nodes.\"\"\" gateway_nodes.update(nodes) return nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def gps_sensor_state_fixture()", "the default mocked transport.\"\"\" return serial_transport @pytest.fixture def transport_write(transport: MagicMock)", "power_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def energy_sensor_state_fixture()", "async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device])", "device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: \"\"\"Mock out device tracker known devices", "MagicMock() async def mock_connect() -> None: \"\"\"Mock the start method.\"\"\"", "dict: \"\"\"Load the sound sensor state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def", "dict ) -> Sensor: \"\"\"Load the temperature sensor.\"\"\" nodes =", "dict: \"\"\"Load the gps sensor state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def", "def temperature_sensor_state_fixture() -> dict: \"\"\"Load the temperature sensor state.\"\"\" return", "autospec=True ), patch( \"mysensors.task.Persistence\", autospec=True ) as persistence_class: persistence =", "dict) -> Sensor: \"\"\"Load the text child node.\"\"\" nodes =", "-> dict: \"\"\"Load the energy sensor state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture", "as transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True), patch( \"mysensors.task.load_fw\", autospec=True ), patch( \"mysensors.task.Persistence\",", "state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict)", "Sensor: \"\"\"Load the sound sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node", "transport mock that accepts string messages.\"\"\" return transport.return_value.send @pytest.fixture(name=\"serial_entry\") async", "features.\"\"\" async def mock_schedule_save_sensors() -> None: \"\"\"Load nodes from via", "state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state:", "nodes: dict[int, Sensor] ) -> None: \"\"\"Mock the gateway features.\"\"\"", "these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async def", "= transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name=\"transport\") def", "transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: \"\"\"Set up", "node = nodes[1] return node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def power_sensor_state_fixture() ->", "distance_sensor_state: dict ) -> Sensor: \"\"\"Load the distance sensor.\"\"\" nodes", "gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]:", "transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock,", "= mock_connect @pytest.fixture(name=\"transport\") def transport_fixture(serial_transport: MagicMock) -> MagicMock: \"\"\"Return the", "the gps sensor state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def gps_sensor(gateway_nodes: dict[int,", "energy_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def sound_sensor_state_fixture()", "The message_string parameter is a string in the MySensors message", "sensor state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state:", "up.\"\"\" return serial_entry @pytest.fixture(name=\"integration\") async def integration_fixture( hass: HomeAssistant, transport:", "integration with a config entry.\"\"\" config: dict[str, Any] = {}", "MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task =", "\"\"\"Provide the config entry used for integration set up.\"\"\" return", "= lambda device: device yield is_device @pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture() ->", "For some reason autospeccing does not recognize these methods. persistence.safe_load_sensors", "persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async def mock_connect() ->", "from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN,", "\"\"\"Return the default mocked transport.\"\"\" return serial_transport @pytest.fixture def transport_write(transport:", "dict) -> Sensor: \"\"\"Load the gps sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes,", "def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor:", "CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\", CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return", "def mock_connect() -> None: \"\"\"Mock the start method.\"\"\" transport.connect_task =", "sound sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return", "node = nodes[1] return node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def distance_sensor_state_fixture() ->", "format. \"\"\" gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return receive_message_callback", "HomeAssistant) -> MockConfigEntry: \"\"\"Create a config entry for a serial", "@pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor: \"\"\"Load", "\"\"\"Mock out device tracker known devices storage.\"\"\" devices = mock_device_tracker_conf", "config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done()", "energy_sensor_state: dict ) -> Sensor: \"\"\"Load the energy sensor.\"\"\" nodes", "async def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry )", "temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"text_node_state\", scope=\"session\") def text_node_state_fixture()", "does not recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors =", "default mocked transport.\"\"\" return serial_transport @pytest.fixture def transport_write(transport: MagicMock) ->", "@pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def sound_sensor_state_fixture() -> dict: \"\"\"Load the sound sensor", "text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor: \"\"\"Load the text", "await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture def", "CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import", "update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def", "collections.abc import AsyncGenerator, Callable, Generator import json from typing import", "power sensor state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor],", "not recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock()", "MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: \"\"\"Return a setup gateway.\"\"\"", "dict[int, Sensor] ) -> dict: \"\"\"Update the gateway nodes.\"\"\" gateway_nodes.update(nodes)", "from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) ->", "{} config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await async_setup_component(hass, DOMAIN, config) await", "for integration set up.\"\"\" return serial_entry @pytest.fixture(name=\"integration\") async def integration_fixture(", "autospec=True ) as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes)", "AsyncGenerator[dict[int, Sensor], None]: \"\"\"Mock a serial transport.\"\"\" with patch( \"mysensors.gateway_serial.AsyncTransport\",", "gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor: \"\"\"Load the", "nodes.\"\"\" gateway_nodes.update(nodes) return nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def gps_sensor_state_fixture() -> dict:", "transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name=\"gateway\") def gateway_fixture( transport:", "MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\", CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE:", "return receive_message_callback @pytest.fixture(name=\"gateway\") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry )", "new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture", "def gps_sensor_state_fixture() -> dict: \"\"\"Load the gps sensor state.\"\"\" return", "return node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def energy_sensor_state_fixture() -> dict: \"\"\"Load the", "temperature_sensor_state: dict ) -> Sensor: \"\"\"Load the temperature sensor.\"\"\" nodes", "from via persistence.\"\"\" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock(", "persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock,", "mock_connect @pytest.fixture(name=\"transport\") def transport_fixture(serial_transport: MagicMock) -> MagicMock: \"\"\"Return the default", "Sensor: \"\"\"Load the power sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node", "nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"text_node_state\",", "Sensor]: \"\"\"Return the gateway nodes dict.\"\"\" return {} @pytest.fixture(name=\"serial_transport\") async", "async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: \"\"\"Create a config entry", "scope=\"session\") def power_sensor_state_fixture() -> dict: \"\"\"Load the power sensor state.\"\"\"", "for the gateway.\"\"\" def receive_message_callback(message_string: str) -> None: \"\"\"Receive a", "nodes[1] return node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def power_sensor_state_fixture() -> dict: \"\"\"Load", "gps_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def power_sensor_state_fixture()", "check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device: is_device.side_effect = lambda device: device", "-> MagicMock: \"\"\"Return the default mocked transport.\"\"\" return serial_transport @pytest.fixture", "scope=\"session\") def energy_sensor_state_fixture() -> dict: \"\"\"Load the energy sensor state.\"\"\"", "load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor:", "MySensors message format. \"\"\" gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string)", "homeassistant.setup import async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def", "dict) -> Sensor: \"\"\"Load the power sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes,", "the temperature sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1]", "mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy import Device from", "entry used for integration set up.\"\"\" return serial_entry @pytest.fixture(name=\"integration\") async", "mocked transport.\"\"\" return serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock:", "@pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture() -> Generator[MagicMock, None, None]: \"\"\"Patch the serial", "lambda device: device yield is_device @pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture() -> dict[int,", "sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node", "hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture() -> Generator[MagicMock, None, None]: \"\"\"Patch the", "def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) ->", "= nodes[1] return node @pytest.fixture(name=\"text_node_state\", scope=\"session\") def text_node_state_fixture() -> dict:", "Sensor] ) -> dict: \"\"\"Update the gateway nodes.\"\"\" gateway_nodes.update(nodes) return", "def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] ) ->", "import annotations from collections.abc import AsyncGenerator, Callable, Generator import json", "def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: \"\"\"Create a config entry for", "-> dict: \"\"\"Load the text node state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture", "import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor", "the gateway.\"\"\" def receive_message_callback(message_string: str) -> None: \"\"\"Receive a message", "the sound sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1]", "unittest.mock import AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway from", "transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor]", "serial_entry @pytest.fixture(name=\"integration\") async def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry:", "def distance_sensor_state_fixture() -> dict: \"\"\"Load the distance sensor state.\"\"\" return", "state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict)", "hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]:", "distance_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def temperature_sensor_state_fixture()", "transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: \"\"\"Return a setup", "MagicMock, nodes: dict[int, Sensor] ) -> None: \"\"\"Mock the gateway", "fixtures.\"\"\" from __future__ import annotations from collections.abc import AsyncGenerator, Callable,", "the text child node.\"\"\" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node =", "-> dict: \"\"\"Load the temperature sensor state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture", "the gateway nodes dict.\"\"\" return {} @pytest.fixture(name=\"serial_transport\") async def serial_transport_fixture(", "dict: \"\"\"Load the text node state.\"\"\" return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def", "fixture.\"\"\" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes:", "-> dict: \"\"\"Update the gateway nodes.\"\"\" gateway_nodes.update(nodes) return nodes @pytest.fixture(name=\"gps_sensor_state\",", "DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport:", "-> Sensor: \"\"\"Load the temperature sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state)", "config: dict[str, Any] = {} config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await", "the MySensors message format. \"\"\" gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\\n", "return devices @pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass: HomeAssistant) -> None: \"\"\"Mock the", ") from homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component from", "message for the gateway.\"\"\" def receive_message_callback(message_string: str) -> None: \"\"\"Receive", "None: \"\"\"Mock the gateway features.\"\"\" async def mock_schedule_save_sensors() -> None:", "Sensor: \"\"\"Load the text child node.\"\"\" nodes = update_gateway_nodes(gateway_nodes, text_node_state)", "\"mysensors.gateway_serial.AsyncTransport\", autospec=True ) as transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True), patch( \"mysensors.task.load_fw\", autospec=True", "dict[int, Sensor] ) -> None: \"\"\"Mock the gateway features.\"\"\" async", "receive_message_callback(message_string: str) -> None: \"\"\"Receive a message with the transport.", "return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) ->", "serial transport.\"\"\" with patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True ) as transport_class, patch(\"mysensors.task.OTAFirmware\",", "def transport_write(transport: MagicMock) -> MagicMock: \"\"\"Return the transport mock that", "Sensor], gps_sensor_state: dict) -> Sensor: \"\"\"Load the gps sensor.\"\"\" nodes", "Any from unittest.mock import AsyncMock, MagicMock, patch from mysensors import", "-> None: \"\"\"Mock the gateway features.\"\"\" async def mock_schedule_save_sensors() ->", "is_device @pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture() -> dict[int, Sensor]: \"\"\"Return the gateway", "nodes[1] return node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def energy_sensor_state_fixture() -> dict: \"\"\"Load", "nodes: dict[int, Sensor] ) -> dict: \"\"\"Update the gateway nodes.\"\"\"", "\"\"\"Load the sound sensor state.\"\"\" return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def sound_sensor(gateway_nodes:", "devices storage.\"\"\" devices = mock_device_tracker_conf return devices @pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass:", "sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node", "dict[str, Any] = {} config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await async_setup_component(hass,", "load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor:", "integration: MockConfigEntry ) -> Callable[[str], None]: \"\"\"Receive a message for", "\"\"\"Load the energy sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node =", "is_device: is_device.side_effect = lambda device: device yield is_device @pytest.fixture(name=\"gateway_nodes\") def", "a message with the transport. The message_string parameter is a", "as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import (", "AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway from mysensors.persistence import", "node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def temperature_sensor_state_fixture() -> dict: \"\"\"Load the temperature", "-> BaseSyncGateway: \"\"\"Return a setup gateway.\"\"\" return transport.call_args[0][0] def load_nodes_state(fixture_path:", "device yield is_device @pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture() -> dict[int, Sensor]: \"\"\"Return", "dict ) -> Sensor: \"\"\"Load the energy sensor.\"\"\" nodes =", "with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield", "method.\"\"\" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport =", "entry.\"\"\" config: dict[str, Any] = {} config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0):", "common mysensors fixtures.\"\"\" from __future__ import annotations from collections.abc import", ") as transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True), patch( \"mysensors.task.load_fw\", autospec=True ), patch(", "set up.\"\"\" return serial_entry @pytest.fixture(name=\"integration\") async def integration_fixture( hass: HomeAssistant,", "integration: MockConfigEntry ) -> BaseSyncGateway: \"\"\"Return a setup gateway.\"\"\" return", "node_id;child_id;command;ack;type;payload\\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name=\"gateway\") def gateway_fixture( transport: MagicMock, integration:", "load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict )", "energy_sensor_state_fixture() -> dict: \"\"\"Load the energy sensor state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\")", "messages.\"\"\" return transport.return_value.send @pytest.fixture(name=\"serial_entry\") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry:", "via persistence.\"\"\" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors", "def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor: \"\"\"Load the", "nodes[1] return node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def temperature_sensor_state_fixture() -> dict: \"\"\"Load", "CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant from", "\"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name=\"config_entry\") def config_entry_fixture(serial_entry:", "\"\"\"Load the temperature sensor state.\"\"\" return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def temperature_sensor(", "@pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor: \"\"\"Load", "device tracker known devices storage.\"\"\" devices = mock_device_tracker_conf return devices", "@pytest.fixture(name=\"text_node_state\", scope=\"session\") def text_node_state_fixture() -> dict: \"\"\"Load the text node", "@pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def temperature_sensor_state_fixture() -> dict: \"\"\"Load the temperature sensor", "Generator import json from typing import Any from unittest.mock import", "def power_sensor_state_fixture() -> dict: \"\"\"Load the power sensor state.\"\"\" return", "MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE,", "__future__ import annotations from collections.abc import AsyncGenerator, Callable, Generator import", "import Sensor import pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt", "reason autospeccing does not recognize these methods. persistence.safe_load_sensors = MagicMock()", "AsyncGenerator[MockConfigEntry, None]: \"\"\"Set up the mysensors integration with a config", "return load_nodes_state(\"mysensors/sound_sensor_state.json\") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) ->", "return node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def sound_sensor_state_fixture() -> dict: \"\"\"Load the", "power sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return", "HomeAssistant from homeassistant.setup import async_setup_component from tests.common import MockConfigEntry, load_fixture", "-> None: \"\"\"Mock the MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture()", "yield is_device @pytest.fixture(name=\"gateway_nodes\") def gateway_nodes_fixture() -> dict[int, Sensor]: \"\"\"Return the", "import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from", "MagicMock: \"\"\"Return the default mocked transport.\"\"\" return serial_transport @pytest.fixture def", "Any] = {} config_entry.add_to_hass(hass) with patch(\"homeassistant.components.mysensors.device.UPDATE_DELAY\", new=0): await async_setup_component(hass, DOMAIN,", "load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor:", "a config entry for a serial gateway.\"\"\" entry = MockConfigEntry(", "transport.return_value.send @pytest.fixture(name=\"serial_entry\") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: \"\"\"Create a", "\"\"\"Load the power sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node =", "mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] ) ->", "from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN", "homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component from tests.common import", "= MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task", "from homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component from tests.common", "= transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect", "CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: \"2.3\", CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, )", ") -> None: \"\"\"Mock the gateway features.\"\"\" async def mock_schedule_save_sensors()", "the energy sensor state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def energy_sensor( gateway_nodes:", "is_serial_port_fixture() -> Generator[MagicMock, None, None]: \"\"\"Patch the serial port check.\"\"\"", "MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] ) -> None: \"\"\"Mock", "= nodes[1] return node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def sound_sensor_state_fixture() -> dict:", "-> dict: \"\"\"Load the distance sensor state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture", ") # For some reason autospeccing does not recognize these", "-> dict: \"\"\"Load the gps sensor state.\"\"\" return load_nodes_state(\"mysensors/gps_sensor_state.json\") @pytest.fixture", "patch( \"mysensors.task.Persistence\", autospec=True ) as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence,", "temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor: \"\"\"Load", "\"2.3\", CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name=\"config_entry\")", "MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]: \"\"\"Receive a message", "None: \"\"\"Mock the MQTT integration.\"\"\" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name=\"is_serial_port\") def is_serial_port_fixture() ->", "gateway features.\"\"\" async def mock_schedule_save_sensors() -> None: \"\"\"Load nodes from", "gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] ) -> dict: \"\"\"Update", "gateway_nodes.update(nodes) return nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def gps_sensor_state_fixture() -> dict: \"\"\"Load", "def mock_schedule_save_sensors() -> None: \"\"\"Load nodes from via persistence.\"\"\" gateway", "known devices storage.\"\"\" devices = mock_device_tracker_conf return devices @pytest.fixture(name=\"mqtt\") def", "Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: \"\"\"Mock a", "def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor:", "transport_class: MagicMock, nodes: dict[int, Sensor] ) -> None: \"\"\"Mock the", "the serial port check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device: is_device.side_effect =", "return load_nodes_state(\"mysensors/text_node_state.json\") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) ->", "-> list[Device]: \"\"\"Mock out device tracker known devices storage.\"\"\" devices", "transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For some", "sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node", "\"\"\"Receive a message with the transport. The message_string parameter is", "node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def energy_sensor_state_fixture() -> dict: \"\"\"Load the energy", "a message for the gateway.\"\"\" def receive_message_callback(message_string: str) -> None:", "\"\"\"Mock the gateway features.\"\"\" async def mock_schedule_save_sensors() -> None: \"\"\"Load", "dict: \"\"\"Load the power sensor state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def", "def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) -> Sensor:", "import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const", "as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class", "node = nodes[1] return node @pytest.fixture(name=\"temperature_sensor_state\", scope=\"session\") def temperature_sensor_state_fixture() ->", "parameter is a string in the MySensors message format. \"\"\"", "= MagicMock() async def mock_connect() -> None: \"\"\"Mock the start", "@pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) ->", "transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]: \"\"\"Receive a", "= nodes[1] return node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def distance_sensor_state_fixture() -> dict:", "= None transport.connect.side_effect = mock_connect @pytest.fixture(name=\"transport\") def transport_fixture(serial_transport: MagicMock) ->", "MagicMock, patch from mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder", "DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import", "def gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: \"\"\"Return", "homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from", "@pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def energy_sensor_state_fixture() -> dict: \"\"\"Load the energy sensor", "update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"energy_sensor_state\", scope=\"session\") def", "transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name=\"transport\")", "transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name=\"transport\") def transport_fixture(serial_transport:", "dict[int, Sensor], power_sensor_state: dict) -> Sensor: \"\"\"Load the power sensor.\"\"\"", "-> Sensor: \"\"\"Load the gps sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state)", "the power sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1]", "is a string in the MySensors message format. \"\"\" gateway", "-> None: \"\"\"Receive a message with the transport. The message_string", "sound_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"distance_sensor_state\", scope=\"session\") def distance_sensor_state_fixture()", "@pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: \"\"\"Return the transport mock", "\"\"\"Load the temperature sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node =", "sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor: \"\"\"Load the sound", "@pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) ->", "persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features(", "Sensor: \"\"\"Load the distance sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node", "distance_sensor_state_fixture() -> dict: \"\"\"Load the distance sensor state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\")", "def text_node_state_fixture() -> dict: \"\"\"Load the text node state.\"\"\" return", "def gateway_nodes_fixture() -> dict[int, Sensor]: \"\"\"Return the gateway nodes dict.\"\"\"", "None: \"\"\"Load nodes from via persistence.\"\"\" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes)", "= update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"text_node_state\", scope=\"session\")", "def receive_message( transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]:", "child node.\"\"\" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node = nodes[1] return", "import HomeAssistant from homeassistant.setup import async_setup_component from tests.common import MockConfigEntry,", "the energy sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1]", "async def mock_schedule_save_sensors() -> None: \"\"\"Load nodes from via persistence.\"\"\"", "the gateway features.\"\"\" async def mock_schedule_save_sensors() -> None: \"\"\"Load nodes", "transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect =", "= AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason autospeccing does", "Sensor import pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import", "def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor: \"\"\"Load the", "return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict", "text_node_state: dict) -> Sensor: \"\"\"Load the text child node.\"\"\" nodes", "serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: \"\"\"Return the transport", "\"\"\"Set up the mysensors integration with a config entry.\"\"\" config:", "-> MockConfigEntry: \"\"\"Create a config entry for a serial gateway.\"\"\"", "@pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: \"\"\"Mock out device tracker", "state.\"\"\" return load_nodes_state(\"mysensors/power_sensor_state.json\") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict)", "dict: \"\"\"Load the distance sensor state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def", "state.\"\"\" return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state:", "\"\"\"Patch the serial port check.\"\"\" with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device: is_device.side_effect", "CONF_DEVICE: \"/test/device\", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name=\"config_entry\") def", "transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name=\"transport\") def transport_fixture(serial_transport: MagicMock)", "mysensors fixtures.\"\"\" from __future__ import annotations from collections.abc import AsyncGenerator,", "return load_nodes_state(\"mysensors/temperature_sensor_state.json\") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict", "return load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict", "patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device: is_device.side_effect = lambda device: device yield is_device", "the transport. The message_string parameter is a string in the", "nodes[1] return node @pytest.fixture(name=\"sound_sensor_state\", scope=\"session\") def sound_sensor_state_fixture() -> dict: \"\"\"Load", "scope=\"session\") def temperature_sensor_state_fixture() -> dict: \"\"\"Load the temperature sensor state.\"\"\"", "up the mysensors integration with a config entry.\"\"\" config: dict[str,", "-> MagicMock: \"\"\"Return the transport mock that accepts string messages.\"\"\"", "with patch( \"mysensors.gateway_serial.AsyncTransport\", autospec=True ) as transport_class, patch(\"mysensors.task.OTAFirmware\", autospec=True), patch(", "mock_connect() -> None: \"\"\"Mock the start method.\"\"\" transport.connect_task = MagicMock()", "def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor: \"\"\"Load the", "annotations from collections.abc import AsyncGenerator, Callable, Generator import json from", "dict[int, Sensor], sound_sensor_state: dict) -> Sensor: \"\"\"Load the sound sensor.\"\"\"", "async def mock_connect() -> None: \"\"\"Mock the start method.\"\"\" transport.connect_task", "Sensor: \"\"\"Load the energy sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node", "mysensors integration with a config entry.\"\"\" config: dict[str, Any] =", "mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest from", "with patch(\"homeassistant.components.mysensors.gateway.cv.isdevice\") as is_device: is_device.side_effect = lambda device: device yield", ") -> Sensor: \"\"\"Load the temperature sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes,", "nodes @pytest.fixture(name=\"gps_sensor_state\", scope=\"session\") def gps_sensor_state_fixture() -> dict: \"\"\"Load the gps", "dict[int, Sensor], distance_sensor_state: dict ) -> Sensor: \"\"\"Load the distance", "= transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For", "from __future__ import annotations from collections.abc import AsyncGenerator, Callable, Generator", "MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: \"\"\"Mock a serial transport.\"\"\"", "@pytest.fixture(name=\"gateway\") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway:", "transport.\"\"\" return serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: \"\"\"Return", "nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node @pytest.fixture(name=\"energy_sensor_state\",", "persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] ) -> None:", "sensor state.\"\"\" return load_nodes_state(\"mysensors/distance_sensor_state.json\") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor],", "config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: \"\"\"Provide the config entry used for", "import MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy", "dict) -> Sensor: \"\"\"Load the sound sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes,", "serial gateway.\"\"\" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION:", "distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) -> Sensor: \"\"\"Load", "from mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy import Device", "transport_fixture(serial_transport: MagicMock) -> MagicMock: \"\"\"Return the default mocked transport.\"\"\" return", "energy sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return", "async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) ->", "MagicMock) -> MagicMock: \"\"\"Return the default mocked transport.\"\"\" return serial_transport", "load_nodes_state(\"mysensors/energy_sensor_state.json\") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict )", "integration set up.\"\"\" return serial_entry @pytest.fixture(name=\"integration\") async def integration_fixture( hass:", "import AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway from mysensors.persistence", "hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry", "the start method.\"\"\" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway)", "\"\"\"Return the transport mock that accepts string messages.\"\"\" return transport.return_value.send", "entry for a serial gateway.\"\"\" entry = MockConfigEntry( domain=DOMAIN, data={", "transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: \"\"\"Load mysensors nodes fixture.\"\"\"", "devices @pytest.fixture(name=\"mqtt\") def mock_mqtt_fixture(hass: HomeAssistant) -> None: \"\"\"Mock the MQTT", "Sensor] ) -> None: \"\"\"Mock the gateway features.\"\"\" async def", "\"\"\"Provide common mysensors fixtures.\"\"\" from __future__ import annotations from collections.abc", "import Any from unittest.mock import AsyncMock, MagicMock, patch from mysensors", "Sensor], nodes: dict[int, Sensor] ) -> dict: \"\"\"Update the gateway", "side_effect=mock_schedule_save_sensors ) # For some reason autospeccing does not recognize", "= nodes[1] return node @pytest.fixture(name=\"power_sensor_state\", scope=\"session\") def power_sensor_state_fixture() -> dict:", "Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import", "nodes[1] return node @pytest.fixture(name=\"text_node_state\", scope=\"session\") def text_node_state_fixture() -> dict: \"\"\"Load", ") -> AsyncGenerator[MockConfigEntry, None]: \"\"\"Set up the mysensors integration with", "\"\"\"Load the distance sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node =", "sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node", "with the transport. The message_string parameter is a string in", "sensor.\"\"\" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node" ]
[ "return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to Edges.py return Edges.sortLines(lines) def", "point[0] sumy += w * point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points):", "in range(0,lens,1): Cluster[i] = [] for point in Points: if", "count == K: break for i in range(0,step,1): devideIntoPoints(points) for", "ret2 = [] for item in votes: if item[0] ==", "if p1 > count: K -= 1 ret.append(ret1) if p2", "str(ans[0][0]) + ',' + str(ans[0][1]) + ')(' + str(ans[1][0]) +", "Cluster[minpos].append(point) def KMean(points,K = 3,step = 50): global Cluster global", "num += 1 K = 3 ret = [] count", "sumx = 0.0 sumy = 0.0 for point in points:", "def voteForPoint(lines): #moved to INTPoint.py global votes global voters votes,voters", "lens minpos = now Cluster[minpos].append(point) def KMean(points,K = 3,step =", "cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd = open(outputname +", "= [] voters = {} image = cv.imread(inputname) edges =", "minlens: minlens = lens minpos = now Cluster[minpos].append(point) def KMean(points,K", "VPoints = [] for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1]", "* point[0] sumy += w * point[1] return (sumx/count,sumy/count) def", "in range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def getFinal(points): count = 0.0", "sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True)", "voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count = 1.0", "import Edges import INTPoint eps = 1e-7 votes = {}", "= 255 color[i] = 0 break for point in clu:", "VPoints VPoints = INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num = 16):", "Edges.py return Edges.getLineABC(line) def getCirAnch(a,b): #moved to Edges.py return Edges.getCirAnch(a,b)", "= point[1] count += 1 if count == K: break", "import copy import math import Edges import INTPoint eps =", "Cluster = [] voters = {} image = cv.imread(inputname) edges", "= [] for item in votes: if item[0] == 'p'", "ret2 = ret1 p1 = votes[item] ret1 = item else:", "def mergeLines(lines): #moved to Edges.py return Edges.mergeLines(lines) def getLineABC(line): #moved", "or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes =", "def deal(inputname,outputname): global votes global Groups global VPoints global Centers", "edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for item in lines:", "votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal", "Centers[count][0] = point[0] Centers[count][1] = point[1] count += 1 if", "i in range(0,K,1): ret.append(Centers[i]) return ret def deal(inputname,outputname): global votes", "item in lines: if item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for", "getLinesLength(line): #moved to INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line): #moved to", "def shouldMerge(line1,line2): #moved to Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved", "ret1 p1 = votes[item] ret1 = item else: if votes[item]", "lines2 = sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2) votes2 =", "open(outputname + 'answer.txt','w') fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1])", "lens = len(Groups[i]) for j in range(0,lens,1): for k in", "item else: if votes[item] > p2: p2 = votes[item] ret2", "= sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal =", "= INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count = 1.0 sumx =", "to Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to INTPoint.py return", "outOfSize(pos,edges): #moved to Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to", "= open(outputname + 'answer.txt','w') fd.write('(' + str(ans[0][0]) + ',' +", "1.0 sumx = 0.0 sumy = 0.0 for point in", "votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal ans = getFinal(VPoints)", "votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal ans = getFinal(VPoints) print ans", "in range(0,step,1): devideIntoPoints(points) for i in range(0,K,1): Centers[i] = getGraPoint(Cluster[i])", "p2 = votes[item] ret2 = item else: count += votes[item]", "clu: if point[0] > 0 and point[1] > 0: if", "cen in Centers: now += 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if", "VPoints.append(votes2[i][0]) votes = votesFinal ans = getFinal(VPoints) print ans edges", "import math import Edges import INTPoint eps = 1e-7 votes", "'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color =", "num * 0.1 if p1 > count: K -= 1", "getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines = getLines(edges) lines2 = copy.deepcopy(lines)", "255 color[i] = 0 break for point in clu: if", "for j in range(0,lens,1): for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def", "if count == K: break for i in range(0,step,1): devideIntoPoints(points)", "Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to Edges.py return Edges.extenLine(line,edges)", "+ str(ans[2][0]) + ',' + str(ans[2][1]) + ')') fd.close deal(\"data/1.jpg\",'1')", "votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal", "Centers = [] Cluster = [] voters = {} image", "def extenLines(lines,edges): #moved to Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved", "INTPoint.py global votes global voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return def", "< edges.shape[0]: if votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point", "votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v' or votes2[i][0][0] ==", "Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines):", "= INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num = 16): #this function", "count += 1 if count == K: break for i", "eps = 1e-7 votes = {} Groups = [] VPoints", "cv2 as cv import numpy as np import copy import", "KMean(points,K) for i in range(0,K,1): ret.append(Centers[i]) return ret def deal(inputname,outputname):", "= 0 break for point in clu: if point[0] >", "#moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to Edges.py", "return def getGraPoint(points): count = 1.0 sumx = 0.0 sumy", "INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line): #moved to INTPoint.py return INTPoint.getMidPoint(line)", "'answer.txt','w') fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1]) + ')('", "'h' and votes[point] != 0: Centers[count][0] = point[0] Centers[count][1] =", "or item[0] == 'h' or item[0] == 'v': if votes[item]", "0.2617): #moved to INTPoint.py global VPoints VPoints = INTPoint.getVPoints2(lines,arange) return", "[] voters = {} def getEdges(image): #moved to Edges.py return", "np import copy import math import Edges import INTPoint eps", "voteForPoint(lines): #moved to INTPoint.py global votes global voters votes,voters =", "i in range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h' or votes2[i][0][0] ==", "in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count = 0 for point in", "and point[0] != 'v' and point[0] != 'h' and votes[point]", "to Edges.py return Edges.getLineABC(line) def getCirAnch(a,b): #moved to Edges.py return", "lines = getLines(edges) lines2 = copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2", "0 and point[1] > 0: if point[0] < edges.shape[1] and", "votes global voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count", "== 'p' or item[0] == 'h' or item[0] == 'v':", "')(' + str(ans[1][0]) + ',' + str(ans[1][1]) + ')(' +", "[] Centers = [] if K == 1: step =", "def outOfSize(pos,edges): #moved to Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved", "cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans: if point[0] > 0 and", "= removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2)", "votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] = votes2[i][1]", "to Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to Edges.py return", "to Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to Edges.py return", "return (sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster lens = len(Cluster) for", "[] Cluster = [] voters = {} def getEdges(image): #moved", "points: w = votes[point] count += w sumx += w", "for point in Points: if point[0] == 'p' or point[0]", "getGraPoint(points): count = 1.0 sumx = 0.0 sumy = 0.0", "< minlens: minlens = lens minpos = now Cluster[minpos].append(point) def", "INTPoint.getMidPoint(line) def getArch(line,point): #moved to INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines):", "= len(Cluster) for i in range(0,lens,1): Cluster[i] = [] for", "item[0] == 'p' or item[0] == 'h' or item[0] ==", "and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname +", "0.0 for point in points: w = votes[point] count +=", "cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for", "now = -1 for cen in Centers: now += 1", "1 if count == K: break for i in range(0,step,1):", "+= 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens: minlens", "Centers = [] Cluster = [] voters = {} def", "Edges.py return Edges.mergeLines(lines) def getLineABC(line): #moved to Edges.py return Edges.getLineABC(line)", "Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to Edges.py return Edges.extenLines(lines,edges)", "== 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color", "def devideIntoPoints(Points): global Cluster lens = len(Cluster) for i in", "INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count = 1.0 sumx = 0.0", "for item in votes: if item[0] == 'p' or item[0]", "Cluster global voters votes = {} Groups = [] VPoints", "+= votes[item] num += 1 K = 3 ret =", "for point in points: w = votes[point] count += w", "lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens: minlens = lens", "getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes", "= min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal = {} VPoints = []", "'v': if votes[item] > p1: p2 = p1 ret2 =", "point[0] > 0 and point[1] > 0: if point[0] <", "0 now = -1 for cen in Centers: now +=", "VPoints = [] Centers = [] Cluster = [] voters", "sumy += w * point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points): global", "= getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines = getLines(edges) lines2 =", "'h' or point[0] == 'v': continue if votes[point] == 0:", "cv.imwrite(outputname + 'edges.jpg',edges) lines = getLines(edges) lines2 = copy.deepcopy(lines) lines2", "if point[0] < edges.shape[1] and point[1] < edges.shape[0]: if votes[point]", "Groups = [] VPoints = [] Centers = [] Cluster", "speed for i in range(0,num + 1,1): lens = len(Groups[i])", "w sumx += w * point[0] sumy += w *", "!= 'h' and votes[point] != 0: Centers[count][0] = point[0] Centers[count][1]", "#moved to Edges.py return Edges.getLineABC(line) def getCirAnch(a,b): #moved to Edges.py", "len(Groups[i]) for j in range(0,lens,1): for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k]))", "ret1 = item else: if votes[item] > p2: p2 =", "== 'v': if votes[item] > p1: p2 = p1 ret2", "== 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans: if point[0]", "= copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2", "votes[point] == 0: continue minlens = 1e15 minpos = 0", "min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal = {} VPoints = [] for", "+ ')(' + str(ans[2][0]) + ',' + str(ans[2][1]) + ')')", "p1: p2 = p1 ret2 = ret1 p1 = votes[item]", "range(0,K,1): ret.append(Centers[i]) return ret def deal(inputname,outputname): global votes global Groups", "function is fallen into disuse because of the low speed", "{} Groups = [] VPoints = [] Centers = []", "= [] Centers = [] Cluster = [] voters =", "INTPoint.py global VPoints VPoints = INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num", "Centers[count][1] = point[1] count += 1 if count == K:", "for i in range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h' or votes2[i][0][0]", "getEdges(image): #moved to Edges.py return Edges.getEdges(image) def getLines(edges): #moved to", "== 'h' or votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p':", "point in ans: if point[0] > 0 and point[1] >", "Centers = [] if K == 1: step = 1", "copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 =", "def getVPoints(num = 16): #this function is fallen into disuse", "point in points: w = votes[point] count += w sumx", "num = 0 p1 = 0.0 ret1 = [] p2", "1 ret.append(ret1) if p2 > count: K -= 1 ret.append(ret2)", "if votes[item] > p1: p2 = p1 ret2 = ret1", "continue minlens = 1e15 minpos = 0 now = -1", "+ 'answer.txt','w') fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1]) +", "getVPoints2(lines,arange = 0.2617): #moved to INTPoint.py global VPoints VPoints =", "str(ans[1][0]) + ',' + str(ans[1][1]) + ')(' + str(ans[2][0]) +", "devideIntoPoints(points) for i in range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def getFinal(points):", "[] Centers = [] Cluster = [] voters = {}", "to INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines): #moved to INTPoint.py global", "p1 ret2 = ret1 p1 = votes[item] ret1 = item", "return ret def deal(inputname,outputname): global votes global Groups global VPoints", "Edges import INTPoint eps = 1e-7 votes = {} Groups", "count += w sumx += w * point[0] sumy +=", "to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to Edges.py return", "range(0,4,1): if color[i] == 255: color[i+1] = 255 color[i] =", "== 'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal ans", "Cluster.append([]) Centers.append([0,0]) count = 0 for point in points: if", "now += 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens:", "0.0 ret2 = [] for item in votes: if item[0]", "= 1.0 sumx = 0.0 sumy = 0.0 for point", "1: step = 1 for i in range(0,K,1): Cluster.append([]) Centers.append([0,0])", "1 K = 3 ret = [] count = count", "!= 'v' and point[0] != 'h' and votes[point] != 0:", "to Edges.py return Edges.getLines(edges) def checkRound(pos,edges): #moved to Edges.py return", "'answer.jpg',edges2) fd = open(outputname + 'answer.txt','w') fd.write('(' + str(ans[0][0]) +", "point[0] == 'v': continue if votes[point] == 0: continue minlens", "= 50): global Cluster global Centers Cluster = [] Centers", "else: if votes[item] > p2: p2 = votes[item] ret2 =", "[] count = count / num * 0.1 if p1", "= getLines(edges) lines2 = copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2 =", "fd = open(outputname + 'answer.txt','w') fd.write('(' + str(ans[0][0]) + ','", "point[0] Centers[count][1] = point[1] count += 1 if count ==", "in range(0,lens,1): for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved", "the low speed for i in range(0,num + 1,1): lens", "to INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line): #moved to INTPoint.py return", "+= w sumx += w * point[0] sumy += w", "Edges.getLineABC(line) def getCirAnch(a,b): #moved to Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb):", "!= 0: Centers[count][0] = point[0] Centers[count][1] = point[1] count +=", "* 0.1 if p1 > count: K -= 1 ret.append(ret1)", "math import Edges import INTPoint eps = 1e-7 votes =", "to Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to Edges.py return", "* 0.2))) votesFinal = {} VPoints = [] for i", "VPoints = removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes =", "point in clu: if point[0] > 0 and point[1] >", "INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line): #moved to INTPoint.py return INTPoint.getLinesLength(line)", "return Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617): #moved to INTPoint.py global", "image = cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines", "#moved to INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point): #moved to INTPoint.py", "point[0] != 'h' and votes[point] != 0: Centers[count][0] = point[0]", "getFinal(points): count = 0.0 num = 0 p1 = 0.0", "'edges.jpg',edges) lines = getLines(edges) lines2 = copy.deepcopy(lines) lines2 = extenLines(lines2,edges)", "cv import numpy as np import copy import math import", "= [] Cluster = [] voters = {} def getEdges(image):", "VPoints def getVPoints(num = 16): #this function is fallen into", "disuse because of the low speed for i in range(0,num", "'p' or point[0] == 'h' or point[0] == 'v': continue", "minlens = lens minpos = now Cluster[minpos].append(point) def KMean(points,K =", "= votes[item] ret2 = item else: count += votes[item] num", "Centers.append([0,0]) count = 0 for point in points: if point[0]", "sumx += w * point[0] sumy += w * point[1]", "to Edges.py return Edges.mergeLines(lines) def getLineABC(line): #moved to Edges.py return", "in range(0,K,1): ret.append(Centers[i]) return ret def deal(inputname,outputname): global votes global", "def KMean(points,K = 3,step = 50): global Cluster global Centers", "for point in points: if point[0] != 'p' and point[0]", "= 0.0 sumy = 0.0 for point in points: w", "K -= 1 ret.append(ret2) KMean(points,K) for i in range(0,K,1): ret.append(Centers[i])", "ret.append(Centers[i]) return ret def deal(inputname,outputname): global votes global Groups global", "i in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count = 0 for point", "extenLine(line,edges): #moved to Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to", "1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens: minlens =", "range(0,step,1): devideIntoPoints(points) for i in range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def", "for i in range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def getFinal(points): count", "= getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens: minlens = lens minpos", "= {} image = cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname +", "= 1e-7 votes = {} Groups = [] VPoints =", "= {} VPoints = [] for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]]", "K = 3 ret = [] count = count /", "',' + str(ans[0][1]) + ')(' + str(ans[1][0]) + ',' +", "= {} Groups = [] VPoints = [] Centers =", "Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to Edges.py return Edges.mergeLines(lines) def getLineABC(line):", "j in range(0,lens,1): for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list):", "for item in lines: if item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2)", "def removeSame(list): #moved to INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line): #moved", "minpos = now Cluster[minpos].append(point) def KMean(points,K = 3,step = 50):", "[255,0,0,0] for clu in Cluster: for i in range(0,4,1): if", "clu in Cluster: for i in range(0,4,1): if color[i] ==", "to Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617): #moved to", "return Edges.getLines(edges) def checkRound(pos,edges): #moved to Edges.py return Edges.checkRound(pos,edges) def", "global votes global Groups global VPoints global Centers global Cluster", "mergeLines(lines): #moved to Edges.py return Edges.mergeLines(lines) def getLineABC(line): #moved to", "== 'v' or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0])", "= 0.0 for point in points: w = votes[point] count", "if votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans:", "getLines(edges): #moved to Edges.py return Edges.getLines(edges) def checkRound(pos,edges): #moved to", "Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617): #moved to INTPoint.py", "count = 0 for point in points: if point[0] !=", "for i in range(0,lens,1): Cluster[i] = [] for point in", "p1 = votes[item] ret1 = item else: if votes[item] >", "edges.shape[1] and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname", "INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to Edges.py return Edges.sortLines(lines)", "import numpy as np import copy import math import Edges", "0.0 sumy = 0.0 for point in points: w =", "lines: if item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in", "[] Cluster = [] voters = {} image = cv.imread(inputname)", "global Centers Cluster = [] Centers = [] if K", "cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines = getLines(edges)", "ret2 = item else: count += votes[item] num += 1", "= [] for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0])", "1,1): lens = len(Groups[i]) for j in range(0,lens,1): for k", "{} image = cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges)", "range(0,K,1): Cluster.append([]) Centers.append([0,0]) count = 0 for point in points:", "global votes global voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points):", "continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0]", "#moved to INTPoint.py global VPoints VPoints = INTPoint.getVPoints2(lines,arange) return VPoints", "3,step = 50): global Cluster global Centers Cluster = []", "item in votes: if item[0] == 'p' or item[0] ==", "ret def deal(inputname,outputname): global votes global Groups global VPoints global", "votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans: if", "0 p1 = 0.0 ret1 = [] p2 = 0.0", "def getEdges(image): #moved to Edges.py return Edges.getEdges(image) def getLines(edges): #moved", "i in range(0,lens,1): Cluster[i] = [] for point in Points:", "= votes[point] count += w sumx += w * point[0]", "{} VPoints = [] for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] =", "= len(Groups[i]) for j in range(0,lens,1): for k in range(j+1,lens,1):", "0: continue minlens = 1e15 minpos = 0 now =", "0 for point in points: if point[0] != 'p' and", "INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num = 16): #this function is", "[] VPoints = [] Centers = [] Cluster = []", "count = 1.0 sumx = 0.0 sumy = 0.0 for", "point in Points: if point[0] == 'p' or point[0] ==", "= point[0] Centers[count][1] = point[1] count += 1 if count", "255: color[i+1] = 255 color[i] = 0 break for point", "point[1] count += 1 if count == K: break for", "votes global Groups global VPoints global Centers global Cluster global", "to INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point): #moved to INTPoint.py return", "= [] for point in Points: if point[0] == 'p'", "-= 1 ret.append(ret2) KMean(points,K) for i in range(0,K,1): ret.append(Centers[i]) return", "= [] Cluster = [] voters = {} image =", "[] voters = {} image = cv.imread(inputname) edges = getEdges(image)", "== 'h' or item[0] == 'v': if votes[item] > p1:", "'p' or item[0] == 'h' or item[0] == 'v': if", "= -1 for cen in Centers: now += 1 lens", "or votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] =", "= 0.0 ret1 = [] p2 = 0.0 ret2 =", "for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for i", "= [] VPoints = [] Centers = [] Cluster =", "def getArch(line,point): #moved to INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines): #moved", "deal(inputname,outputname): global votes global Groups global VPoints global Centers global", "i in range(0,num + 1,1): lens = len(Groups[i]) for j", "== K: break for i in range(0,step,1): devideIntoPoints(points) for i", "voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2)))", "range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v' or", "global Cluster lens = len(Cluster) for i in range(0,lens,1): Cluster[i]", "return INTPoint.getArch(line,point) def voteForPoint(lines): #moved to INTPoint.py global votes global", "Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges):", "Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617): #moved to INTPoint.py global VPoints", "continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans: if point[0] > 0", "into disuse because of the low speed for i in", "item else: count += votes[item] num += 1 K =", "{} def getEdges(image): #moved to Edges.py return Edges.getEdges(image) def getLines(edges):", "point[0] != 'p' and point[0] != 'v' and point[0] !=", "shouldMerge(line1,line2): #moved to Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to", "in Cluster: for i in range(0,4,1): if color[i] == 255:", "'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd = open(outputname + 'answer.txt','w') fd.write('('", "def sortLines(lines): #moved to Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange =", "sortLines(lines): #moved to Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617):", "#moved to Edges.py return Edges.getLines(edges) def checkRound(pos,edges): #moved to Edges.py", "def extenLine(line,edges): #moved to Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved", "in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1):", "global voters votes = {} Groups = [] VPoints =", "return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to Edges.py return Edges.outOfSize(pos,edges) def", "import INTPoint eps = 1e-7 votes = {} Groups =", "= 1e15 minpos = 0 now = -1 for cen", "return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to Edges.py return Edges.extenLine(line,edges) def", "in ans: if point[0] > 0 and point[1] > 0:", "= p1 ret2 = ret1 p1 = votes[item] ret1 =", "checkRound(pos,edges): #moved to Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to", "> p2: p2 = votes[item] ret2 = item else: count", "p2: p2 = votes[item] ret2 = item else: count +=", "removeSame(list): #moved to INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line): #moved to", "item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2)", "color[i] = 0 break for point in clu: if point[0]", "VPoints = INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num = 16): #this", "range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def getFinal(points): count = 0.0 num", "mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2)", "Centers[i] = getGraPoint(Cluster[i]) def getFinal(points): count = 0.0 num =", "lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2) VPoints =", "+ ',' + str(ans[0][1]) + ')(' + str(ans[1][0]) + ','", "#moved to Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to Edges.py", "0.0 num = 0 p1 = 0.0 ret1 = []", "Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to Edges.py return Edges.shouldMerge(line1,line2)", "color[i+1] = 255 color[i] = 0 break for point in", "= 3,step = 50): global Cluster global Centers Cluster =", "> 0: if point[0] < edges.shape[1] and point[1] < edges.shape[0]:", "Cluster global Centers Cluster = [] Centers = [] if", "global VPoints global Centers global Cluster global voters votes =", "#moved to Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to INTPoint.py", "item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for clu in", "= votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal ans = getFinal(VPoints) print", "INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point): #moved to INTPoint.py return INTPoint.getArch(line,point)", "< edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd", "+ 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd = open(outputname + 'answer.txt','w')", "global VPoints VPoints = INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num =", "votes2[i][1] VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h'", "for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for clu", "in range(0,4,1): if color[i] == 255: color[i+1] = 255 color[i]", "voters = {} def getEdges(image): #moved to Edges.py return Edges.getEdges(image)", "for point in ans: if point[0] > 0 and point[1]", "+ ',' + str(ans[1][1]) + ')(' + str(ans[2][0]) + ','", "is fallen into disuse because of the low speed for", "if votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v' or votes2[i][0][0]", "< edges.shape[1] and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges)", "lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for clu in Cluster: for", "#moved to Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to Edges.py", "== 'v': continue if votes[point] == 0: continue minlens =", "for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to INTPoint.py", "return Edges.mergeLines(lines) def getLineABC(line): #moved to Edges.py return Edges.getLineABC(line) def", "= 0 now = -1 for cen in Centers: now", "#moved to INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line): #moved to INTPoint.py", "w * point[0] sumy += w * point[1] return (sumx/count,sumy/count)", "== 'p' or point[0] == 'h' or point[0] == 'v':", "= copy.deepcopy(edges) for item in lines: if item[0] == 'N':", "1e15 minpos = 0 now = -1 for cen in", "votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1): if votes2[i][0][0]", "0 break for point in clu: if point[0] > 0", "Cluster lens = len(Cluster) for i in range(0,lens,1): Cluster[i] =", "to INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line): #moved to INTPoint.py return", "def getCrossPoint(linea,lineb): #moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved", "if votes[point] == 0: continue minlens = 1e15 minpos =", "count = 0.0 num = 0 p1 = 0.0 ret1", "i in range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def getFinal(points): count =", "INTPoint.getLinesLength(line) def getMidPoint(line): #moved to INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point):", "color = [255,0,0,0] for clu in Cluster: for i in", "break for point in clu: if point[0] > 0 and", "in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to INTPoint.py return INTPoint.removeSame(list)", "or point[0] == 'v': continue if votes[point] == 0: continue", "= count / num * 0.1 if p1 > count:", "if p2 > count: K -= 1 ret.append(ret2) KMean(points,K) for", "Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines):", "def checkRound(pos,edges): #moved to Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved", "in points: w = votes[point] count += w sumx +=", "as np import copy import math import Edges import INTPoint", "getLines(edges) lines2 = copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2 = mergeLines(lines2)", "i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for i in", "= getFinal(VPoints) print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges)", "= lens minpos = now Cluster[minpos].append(point) def KMean(points,K = 3,step", "0: if point[0] < edges.shape[1] and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10)", "= extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2)", "+ 'edges.jpg',edges) lines = getLines(edges) lines2 = copy.deepcopy(lines) lines2 =", "w * point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster lens", "16): #this function is fallen into disuse because of the", "return INTPoint.getLinesLength(line) def getMidPoint(line): #moved to INTPoint.py return INTPoint.getMidPoint(line) def", "K: break for i in range(0,step,1): devideIntoPoints(points) for i in", "return INTPoint.getMidPoint(line) def getArch(line,point): #moved to INTPoint.py return INTPoint.getArch(line,point) def", "#moved to Edges.py return Edges.getEdges(image) def getLines(edges): #moved to Edges.py", "* point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster lens =", "= votes[item] ret1 = item else: if votes[item] > p2:", "= ret1 p1 = votes[item] ret1 = item else: if", "Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges):", "#moved to Edges.py return Edges.mergeLines(lines) def getLineABC(line): #moved to Edges.py", "/ num * 0.1 if p1 > count: K -=", "if K == 1: step = 1 for i in", "INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange", "= item else: count += votes[item] num += 1 K", "global voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count =", "def getCirAnch(a,b): #moved to Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved", "= {} def getEdges(image): #moved to Edges.py return Edges.getEdges(image) def", "Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to Edges.py return Edges.outOfSize(pos,edges)", "votesFinal = {} VPoints = [] for i in range(0,lenofvotes,1):", "because of the low speed for i in range(0,num +", "def getGraPoint(points): count = 1.0 sumx = 0.0 sumy =", "import cv2 as cv import numpy as np import copy", "def getMidPoint(line): #moved to INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point): #moved", "if lens < minlens: minlens = lens minpos = now", "low speed for i in range(0,num + 1,1): lens =", "+ str(ans[0][0]) + ',' + str(ans[0][1]) + ')(' + str(ans[1][0])", "votes = {} Groups = [] VPoints = [] Centers", "+ str(ans[1][1]) + ')(' + str(ans[2][0]) + ',' + str(ans[2][1])", "range(0,lens,1): Cluster[i] = [] for point in Points: if point[0]", "points: if point[0] != 'p' and point[0] != 'v' and", "0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans: if point[0] >", "if item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2:", "print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for item", "= 0.0 num = 0 p1 = 0.0 ret1 =", "fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1]) + ')(' +", "to Edges.py return Edges.getEdges(image) def getLines(edges): #moved to Edges.py return", "INTPoint.getArch(line,point) def voteForPoint(lines): #moved to INTPoint.py global votes global voters", "'v' or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes", "'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal ans =", "== 255: color[i+1] = 255 color[i] = 0 break for", "return VPoints def getVPoints(num = 16): #this function is fallen", "edges2 = copy.deepcopy(edges) for item in lines: if item[0] ==", "[] for point in Points: if point[0] == 'p' or", "#moved to Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to Edges.py", "+= w * point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster", "ret = [] count = count / num * 0.1", "return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to Edges.py return Edges.mergeLines(lines) def", "voters = {} image = cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname", "votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal = {} VPoints", "Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb)", "for i in range(0,step,1): devideIntoPoints(points) for i in range(0,K,1): Centers[i]", "else: count += votes[item] num += 1 K = 3", "3 ret = [] count = count / num *", "getMidPoint(line): #moved to INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point): #moved to", "count = count / num * 0.1 if p1 >", "p1 = 0.0 ret1 = [] p2 = 0.0 ret2", "in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for clu in Cluster:", "> 0 and point[1] > 0: if point[0] < edges.shape[1]", "def getLines(edges): #moved to Edges.py return Edges.getLines(edges) def checkRound(pos,edges): #moved", "= [255,0,0,0] for clu in Cluster: for i in range(0,4,1):", "in Centers: now += 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens", "point in points: if point[0] != 'p' and point[0] !=", "removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) *", "#this function is fallen into disuse because of the low", "global Cluster global Centers Cluster = [] Centers = []", "and point[0] != 'h' and votes[point] != 0: Centers[count][0] =", "votes[item] > p1: p2 = p1 ret2 = ret1 p1", "+= 1 if count == K: break for i in", "1 for i in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count = 0", "'h' or item[0] == 'v': if votes[item] > p1: p2", "range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1): if", "i in range(0,4,1): if color[i] == 255: color[i+1] = 255", "if votes[item] > p2: p2 = votes[item] ret2 = item", "-= 1 ret.append(ret1) if p2 > count: K -= 1", "votes[point] != 0: Centers[count][0] = point[0] Centers[count][1] = point[1] count", "if point[0] > 0 and point[1] > 0: if point[0]", "to INTPoint.py global VPoints VPoints = INTPoint.getVPoints2(lines,arange) return VPoints def", "extenLines(lines,edges): #moved to Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to", "votes,voters = INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count = 1.0 sumx", "and point[1] < edges.shape[0]: if votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10)", "= [] p2 = 0.0 ret2 = [] for item", "== 'h' or point[0] == 'v': continue if votes[point] ==", "as cv import numpy as np import copy import math", "for i in range(0,4,1): if color[i] == 255: color[i+1] =", "votes[point] count += w sumx += w * point[0] sumy", "cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for clu in Cluster: for i", "if color[i] == 255: color[i+1] = 255 color[i] = 0", "votes[item] > p2: p2 = votes[item] ret2 = item else:", "= 0 p1 = 0.0 ret1 = [] p2 =", "+ str(ans[0][1]) + ')(' + str(ans[1][0]) + ',' + str(ans[1][1])", "cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd = open(outputname", "Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2):", "VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line):", "#moved to INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines): #moved to INTPoint.py", "VPoints global Centers global Cluster global voters votes = {}", "lens = len(Cluster) for i in range(0,lens,1): Cluster[i] = []", "voters votes = {} Groups = [] VPoints = []", "w = votes[point] count += w sumx += w *", "Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to Edges.py return Edges.mergeLines(lines)", "count: K -= 1 ret.append(ret1) if p2 > count: K", "point[0] < edges.shape[1] and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname +", "point[1] < edges.shape[0]: if votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for", "to Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to Edges.py return", "#moved to INTPoint.py global votes global voters votes,voters = INTPoint.voteForPoint(lines,VPoints)", "0: Centers[count][0] = point[0] Centers[count][1] = point[1] count += 1", "continue if votes[point] == 0: continue minlens = 1e15 minpos", "votes[item] ret2 = item else: count += votes[item] num +=", "def getLineABC(line): #moved to Edges.py return Edges.getLineABC(line) def getCirAnch(a,b): #moved", "KMean(points,K = 3,step = 50): global Cluster global Centers Cluster", "#moved to INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line): #moved to INTPoint.py", "= 16): #this function is fallen into disuse because of", "edges.shape[1] and point[1] < edges.shape[0]: if votes[point] == 0: continue", "return Edges.getEdges(image) def getLines(edges): #moved to Edges.py return Edges.getLines(edges) def", "p2 = p1 ret2 = ret1 p1 = votes[item] ret1", "K -= 1 ret.append(ret1) if p2 > count: K -=", "= [] Centers = [] if K == 1: step", "#moved to Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617): #moved", "edges = getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines = getLines(edges) lines2", "lens < minlens: minlens = lens minpos = now Cluster[minpos].append(point)", "if item[0] == 'p' or item[0] == 'h' or item[0]", "global Groups global VPoints global Centers global Cluster global voters", "= [] voters = {} def getEdges(image): #moved to Edges.py", "numpy as np import copy import math import Edges import", "i in range(0,step,1): devideIntoPoints(points) for i in range(0,K,1): Centers[i] =", "50): global Cluster global Centers Cluster = [] Centers =", "global Centers global Cluster global voters votes = {} Groups", "Centers: now += 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens <", "of the low speed for i in range(0,num + 1,1):", "item[0] == 'v': if votes[item] > p1: p2 = p1", "Edges.getEdges(image) def getLines(edges): #moved to Edges.py return Edges.getLines(edges) def checkRound(pos,edges):", "VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h' or", "point[0] < edges.shape[1] and point[1] < edges.shape[0]: if votes[point] ==", "in range(0,num + 1,1): lens = len(Groups[i]) for j in", "> count: K -= 1 ret.append(ret1) if p2 > count:", "now Cluster[minpos].append(point) def KMean(points,K = 3,step = 50): global Cluster", "Cluster = [] voters = {} def getEdges(image): #moved to", "== 0: continue minlens = 1e15 minpos = 0 now", "votes: if item[0] == 'p' or item[0] == 'h' or", "[] p2 = 0.0 ret2 = [] for item in", "ret.append(ret2) KMean(points,K) for i in range(0,K,1): ret.append(Centers[i]) return ret def", "+ 1,1): lens = len(Groups[i]) for j in range(0,lens,1): for", "getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens: minlens = lens minpos =", "0.2))) votesFinal = {} VPoints = [] for i in", "ans: if point[0] > 0 and point[1] > 0: if", "Edges.getLines(edges) def checkRound(pos,edges): #moved to Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges):", "and votes[point] != 0: Centers[count][0] = point[0] Centers[count][1] = point[1]", "votes[item] num += 1 K = 3 ret = []", "= sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda", "[] if K == 1: step = 1 for i", "Groups global VPoints global Centers global Cluster global voters votes", "cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for item in lines: if item[0]", "Cluster: for i in range(0,4,1): if color[i] == 255: color[i+1]", "getGraPoint(Cluster[i]) def getFinal(points): count = 0.0 num = 0 p1", "step = 1 for i in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count", "minpos = 0 now = -1 for cen in Centers:", "str(ans[1][1]) + ')(' + str(ans[2][0]) + ',' + str(ans[2][1]) +", "= 0.0 ret2 = [] for item in votes: if", "Edges.mergeLines(lines) def getLineABC(line): #moved to Edges.py return Edges.getLineABC(line) def getCirAnch(a,b):", "INTPoint eps = 1e-7 votes = {} Groups = []", "= getGraPoint(Cluster[i]) def getFinal(points): count = 0.0 num = 0", "Centers global Cluster global voters votes = {} Groups =", "0: if point[0] < edges.shape[1] and point[1] < edges.shape[0]: if", "return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to Edges.py return Edges.extenLines(lines,edges) def", "K == 1: step = 1 for i in range(0,K,1):", "(sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster lens = len(Cluster) for i", "!= 'p' and point[0] != 'v' and point[0] != 'h'", "#devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2) votes2", "ret.append(ret1) if p2 > count: K -= 1 ret.append(ret2) KMean(points,K)", "or point[0] == 'h' or point[0] == 'v': continue if", "votes[item] ret1 = item else: if votes[item] > p2: p2", "in lines: if item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item", "extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2) VPoints", "Cluster = [] Centers = [] if K == 1:", "p2 = 0.0 ret2 = [] for item in votes:", "== 1: step = 1 for i in range(0,K,1): Cluster.append([])", "0.1 if p1 > count: K -= 1 ret.append(ret1) if", "-1 for cen in Centers: now += 1 lens =", "[] for item in votes: if item[0] == 'p' or", "if point[0] < edges.shape[1] and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname", "1e-7 votes = {} Groups = [] VPoints = []", "= votes2[i][1] VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1): if votes2[i][0][0] ==", "edges.shape[0]: if votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in", "= cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines =", "return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to Edges.py return Edges.shouldMerge(line1,line2) def", "+ str(ans[1][0]) + ',' + str(ans[1][1]) + ')(' + str(ans[2][0])", "Edges.py return Edges.getLines(edges) def checkRound(pos,edges): #moved to Edges.py return Edges.checkRound(pos,edges)", "votes = votesFinal ans = getFinal(VPoints) print ans edges =", "= cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for item in lines: if", "and point[1] > 0: if point[0] < edges.shape[1] and point[1]", "in Points: if point[0] == 'p' or point[0] == 'h'", "= 1 for i in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count =", "ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for item in", "str(ans[0][1]) + ')(' + str(ans[1][0]) + ',' + str(ans[1][1]) +", "ret1 = [] p2 = 0.0 ret2 = [] for", "lines2 = copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3)", "1 ret.append(ret2) KMean(points,K) for i in range(0,K,1): ret.append(Centers[i]) return ret", "in range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v'", "in clu: if point[0] > 0 and point[1] > 0:", "break for i in range(0,step,1): devideIntoPoints(points) for i in range(0,K,1):", "k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to INTPoint.py return", "return INTPoint.removeSame(list) def getLinesLength(line): #moved to INTPoint.py return INTPoint.getLinesLength(line) def", "devideIntoPoints(Points): global Cluster lens = len(Cluster) for i in range(0,lens,1):", "> count: K -= 1 ret.append(ret2) KMean(points,K) for i in", "p2 > count: K -= 1 ret.append(ret2) KMean(points,K) for i", "copy.deepcopy(edges) for item in lines: if item[0] == 'N': continue", "item[0] == 'h' or item[0] == 'v': if votes[item] >", "sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal = {}", "votesFinal ans = getFinal(VPoints) print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2", "to Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to Edges.py return", "range(0,lens,1): for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to", "for cen in Centers: now += 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1]))", "global Cluster global voters votes = {} Groups = []", "= 3 ret = [] count = count / num", "< edges.shape[1] and point[1] < edges.shape[0]: if votes[point] == 0:", "0.0 ret1 = [] p2 = 0.0 ret2 = []", "Edges.py return Edges.getEdges(image) def getLines(edges): #moved to Edges.py return Edges.getLines(edges)", "Points: if point[0] == 'p' or point[0] == 'h' or", "for i in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count = 0 for", "def getVPoints2(lines,arange = 0.2617): #moved to INTPoint.py global VPoints VPoints", "point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster lens = len(Cluster)", "point[0] == 'h' or point[0] == 'v': continue if votes[point]", "for clu in Cluster: for i in range(0,4,1): if color[i]", "= 0.2617): #moved to INTPoint.py global VPoints VPoints = INTPoint.getVPoints2(lines,arange)", "getFinal(VPoints) print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for", "'p' and point[0] != 'v' and point[0] != 'h' and", "Centers Cluster = [] Centers = [] if K ==", "'h' or votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]]", "+= 1 K = 3 ret = [] count =", "getCrossPoint(linea,lineb): #moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to", "')(' + str(ans[2][0]) + ',' + str(ans[2][1]) + ')') fd.close", "',' + str(ans[1][1]) + ')(' + str(ans[2][0]) + ',' +", "if point[0] != 'p' and point[0] != 'v' and point[0]", "Cluster[i] = [] for point in Points: if point[0] ==", "+= w * point[0] sumy += w * point[1] return", "in votes: if item[0] == 'p' or item[0] == 'h'", "= mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints)", "for i in range(0,num + 1,1): lens = len(Groups[i]) for", "count: K -= 1 ret.append(ret2) KMean(points,K) for i in range(0,K,1):", "lines2 = extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2)", "ans = getFinal(VPoints) print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 =", "getLineABC(line): #moved to Edges.py return Edges.getLineABC(line) def getCirAnch(a,b): #moved to", "= [] if K == 1: step = 1 for", "INTPoint.removeSame(list) def getLinesLength(line): #moved to INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line):", "p1 > count: K -= 1 ret.append(ret1) if p2 >", "color[i] == 255: color[i+1] = 255 color[i] = 0 break", "point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2)", "#moved to Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to Edges.py", "#moved to Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to Edges.py", "+ 'answer.jpg',edges2) fd = open(outputname + 'answer.txt','w') fd.write('(' + str(ans[0][0])", "> p1: p2 = p1 ret2 = ret1 p1 =", "copy import math import Edges import INTPoint eps = 1e-7", "'v': continue if votes[point] == 0: continue minlens = 1e15", "range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to INTPoint.py return INTPoint.removeSame(list) def", "def getFinal(points): count = 0.0 num = 0 p1 =", "or item[0] == 'v': if votes[item] > p1: p2 =", "= item else: if votes[item] > p2: p2 = votes[item]", "cv.imwrite(outputname + 'answer.jpg',edges2) fd = open(outputname + 'answer.txt','w') fd.write('(' +", "return Edges.getLineABC(line) def getCirAnch(a,b): #moved to Edges.py return Edges.getCirAnch(a,b) def", "len(Cluster) for i in range(0,lens,1): Cluster[i] = [] for point", "point[1] > 0: if point[0] < edges.shape[1] and point[1] <", "return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def", "minlens = 1e15 minpos = 0 now = -1 for", "+ ')(' + str(ans[1][0]) + ',' + str(ans[1][1]) + ')('", "point[0] != 'v' and point[0] != 'h' and votes[point] !=", "fallen into disuse because of the low speed for i", "getVPoints(num = 16): #this function is fallen into disuse because", "for i in range(0,K,1): ret.append(Centers[i]) return ret def deal(inputname,outputname): global", "= votesFinal ans = getFinal(VPoints) print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR)", "INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines): #moved to INTPoint.py global votes", "sumy = 0.0 for point in points: w = votes[point]", "'v' and point[0] != 'h' and votes[point] != 0: Centers[count][0]", "in points: if point[0] != 'p' and point[0] != 'v'", "getCirAnch(a,b): #moved to Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to", "point[0] == 'p' or point[0] == 'h' or point[0] ==", "getArch(line,point): #moved to INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines): #moved to", "count += votes[item] num += 1 K = 3 ret", "def getLinesLength(line): #moved to INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line): #moved", "= now Cluster[minpos].append(point) def KMean(points,K = 3,step = 50): global", "edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd =", "= [] count = count / num * 0.1 if", "for point in clu: if point[0] > 0 and point[1]", "lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal = {} VPoints =", "to INTPoint.py global votes global voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return", "= 0 for point in points: if point[0] != 'p'", "range(0,num + 1,1): lens = len(Groups[i]) for j in range(0,lens,1):", "if point[0] == 'p' or point[0] == 'h' or point[0]", "count / num * 0.1 if p1 > count: K", "[] for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for" ]
[ "assert response.status_code == 200 file_list = response.get_json() assert len(file_list) ==", "def test_put_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response", "'x': 0, 'y': 0}], 'tags': ['a', 'b'] }) assert response.status_code", "== 200 def test_get_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code ==", "test_list_example_directory(client): response = client.get(\"/api/files\") assert response.status_code == 200 file_list =", "'01.jpg' assert file_list[1]['key'] == '02.png' def test_get_example_image(client): response = client.get(\"/api/files/image/x.jpg\")", "client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response = client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code", "response.status_code == 404 response = client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations': [{'width': 10,", "== 2 assert file_list[0]['key'] == '01.jpg' assert file_list[1]['key'] == '02.png'", "= client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response = client.put(\"/api/files/image_data/image.jpg\", json={", "assert file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg' assert file_list[2]['key']", "'image.jpg' assert file_list[2]['key'] == 'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg' assert", "response = client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code == 200 def test_get_example_image_data(client): response", "len(file_list) == 5 assert file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key'] ==", "test_put_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response =", "'02.png' def test_get_example_image(client): response = client.get(\"/api/files/image/x.jpg\") assert response.status_code == 404", "== 'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png'", "file_list[0]['key'] == '01.jpg' assert file_list[1]['key'] == '02.png' def test_get_example_image(client): response", "file_list[1]['key'] == 'image.jpg' assert file_list[2]['key'] == 'more_images/' assert file_list[3]['key'] ==", "response = client.get(\"/api/files/image/image.jpg\") assert response.status_code == 200 response = client.get(\"/api/files/image/more_images/01.jpg\")", "= response.get_json() assert 'annotations' in data assert 'tags' in data", "200 data = response.get_json() assert 'annotations' in data assert 'tags'", "response.status_code == 200 def test_get_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code", "client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations': [{'width': 10, 'height': 10, 'x': 0, 'y':", "0, 'y': 0}], 'tags': ['a', 'b'] }) assert response.status_code ==", "10, 'height': 10, 'x': 0, 'y': 0}], 'tags': ['a', 'b']", "response = client.get(\"/api/files/image/x.jpg\") assert response.status_code == 404 response = client.get(\"/api/files/image/image.jpg\")", "response = client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code == 200 data = response.get_json()", "assert 'annotations' in data assert 'tags' in data def test_put_example_image_data(client):", "assert len(file_list) == 2 assert file_list[0]['key'] == '01.jpg' assert file_list[1]['key']", "['a', 'b'] }) assert response.status_code == 200 response = client.get(\"/api/files/image_data/image.jpg\")", "response = client.get(\"/api/files?path=more_images\") assert response.status_code == 200 file_list = response.get_json()", "client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 200 data = response.get_json() assert 'annotations'", "response = client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations': [{'width': 10, 'height': 10, 'x':", "= client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 200 data = response.get_json() assert", "file_list[2]['key'] == 'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key'] ==", "client.get(\"/api/files\") assert response.status_code == 200 file_list = response.get_json() assert len(file_list)", "file_list = response.get_json() assert len(file_list) == 5 assert file_list[0]['key'] ==", "response.get_json() assert len(file_list) == 2 assert file_list[0]['key'] == '01.jpg' assert", "200 response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 200 data =", "data assert 'tags' in data def test_put_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\")", "assert response.status_code == 200 data = response.get_json() assert 'annotations' in", "client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code == 200 def test_get_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\")", "assert response.status_code == 404 response = client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code ==", "assert response.status_code == 404 response = client.get(\"/api/files/image/image.jpg\") assert response.status_code ==", "= client.get(\"/api/files/image/image.jpg\") assert response.status_code == 200 response = client.get(\"/api/files/image/more_images/01.jpg\") assert", "'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png' def", "file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client): response", "client.get(\"/api/files?path=more_images\") assert response.status_code == 200 file_list = response.get_json() assert len(file_list)", "assert response.status_code == 200 response = client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code ==", "== '01.jpg' assert file_list[1]['key'] == '02.png' def test_get_example_image(client): response =", "0}], 'tags': ['a', 'b'] }) assert response.status_code == 200 response", "client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code == 200 data = response.get_json() assert 'annotations'", "200 file_list = response.get_json() assert len(file_list) == 5 assert file_list[0]['key']", "client.get(\"/api/files/image/image.jpg\") assert response.status_code == 200 response = client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code", "file_list[1]['key'] == '02.png' def test_get_example_image(client): response = client.get(\"/api/files/image/x.jpg\") assert response.status_code", "= client.get(\"/api/files?path=more_images\") assert response.status_code == 200 file_list = response.get_json() assert", "== '02.png' def test_get_example_image(client): response = client.get(\"/api/files/image/x.jpg\") assert response.status_code ==", "response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 200 data = response.get_json()", "client.get(\"/api/files/image/x.jpg\") assert response.status_code == 404 response = client.get(\"/api/files/image/image.jpg\") assert response.status_code", "client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response = client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations':", "test_list_example_directory_nested(client): response = client.get(\"/api/files?path=more_images\") assert response.status_code == 200 file_list =", "== 'more_images/02.png' def test_list_example_directory_nested(client): response = client.get(\"/api/files?path=more_images\") assert response.status_code ==", "assert 'tags' in data def test_put_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert", "[{'width': 10, 'height': 10, 'x': 0, 'y': 0}], 'tags': ['a',", "5 assert file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg' assert", "response = client.get(\"/api/files\") assert response.status_code == 200 file_list = response.get_json()", "== 200 response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 200 data", "== 404 response = client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations': [{'width': 10, 'height':", "len(file_list) == 2 assert file_list[0]['key'] == '01.jpg' assert file_list[1]['key'] ==", "assert file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client): response = client.get(\"/api/files?path=more_images\") assert", "def test_get_example_image(client): response = client.get(\"/api/files/image/x.jpg\") assert response.status_code == 404 response", "== 'image.jpg' assert file_list[2]['key'] == 'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg'", "10, 'x': 0, 'y': 0}], 'tags': ['a', 'b'] }) assert", "assert response.status_code == 200 def test_get_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert", "response.status_code == 200 response = client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code == 200", "response.status_code == 200 data = response.get_json() assert 'annotations' in data", "assert response.status_code == 200 response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code ==", "== 200 data = response.get_json() assert 'annotations' in data assert", "assert file_list[0]['key'] == '01.jpg' assert file_list[1]['key'] == '02.png' def test_get_example_image(client):", "200 def test_get_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404", "200 file_list = response.get_json() assert len(file_list) == 2 assert file_list[0]['key']", "assert response.status_code == 404 response = client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations': [{'width':", "file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client): response = client.get(\"/api/files?path=more_images\") assert response.status_code", "data def test_put_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404", "response.get_json() assert len(file_list) == 5 assert file_list[0]['key'] == 'image_annotated.jpg' assert", "response.status_code == 404 response = client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code == 200", "2 assert file_list[0]['key'] == '01.jpg' assert file_list[1]['key'] == '02.png' def", "response.get_json() assert 'annotations' in data assert 'tags' in data def", "== 5 assert file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg'", "404 response = client.get(\"/api/files/image/image.jpg\") assert response.status_code == 200 response =", "response.status_code == 200 file_list = response.get_json() assert len(file_list) == 2", "response.status_code == 404 response = client.get(\"/api/files/image/image.jpg\") assert response.status_code == 200", "= client.get(\"/api/files\") assert response.status_code == 200 file_list = response.get_json() assert", "data = response.get_json() assert 'annotations' in data assert 'tags' in", "test_get_example_image(client): response = client.get(\"/api/files/image/x.jpg\") assert response.status_code == 404 response =", "200 response = client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code == 200 def test_get_example_image_data(client):", "assert file_list[1]['key'] == '02.png' def test_get_example_image(client): response = client.get(\"/api/files/image/x.jpg\") assert", "'y': 0}], 'tags': ['a', 'b'] }) assert response.status_code == 200", "'height': 10, 'x': 0, 'y': 0}], 'tags': ['a', 'b'] })", "== 404 response = client.get(\"/api/files/image/image.jpg\") assert response.status_code == 200 response", "= client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code == 200 def test_get_example_image_data(client): response =", "= response.get_json() assert len(file_list) == 5 assert file_list[0]['key'] == 'image_annotated.jpg'", "'annotations' in data assert 'tags' in data def test_put_example_image_data(client): response", "file_list = response.get_json() assert len(file_list) == 2 assert file_list[0]['key'] ==", "404 response = client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code == 200 data =", "== 404 response = client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code == 200 data", "== 'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client): response =", "== 'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg' assert file_list[2]['key'] == 'more_images/'", "assert file_list[1]['key'] == 'image.jpg' assert file_list[2]['key'] == 'more_images/' assert file_list[3]['key']", "'annotations': [{'width': 10, 'height': 10, 'x': 0, 'y': 0}], 'tags':", "test_get_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response =", "== 200 file_list = response.get_json() assert len(file_list) == 2 assert", "response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response = client.put(\"/api/files/image_data/image.jpg\",", "404 response = client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations': [{'width': 10, 'height': 10,", "file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg' assert file_list[2]['key'] ==", "'b'] }) assert response.status_code == 200 response = client.get(\"/api/files/image_data/image.jpg\") assert", "assert len(file_list) == 5 assert file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key']", "== 200 response = client.get(\"/api/files/image/more_images/01.jpg\") assert response.status_code == 200 def", "in data def test_put_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code ==", "response.status_code == 200 response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 200", "'tags': ['a', 'b'] }) assert response.status_code == 200 response =", "= client.put(\"/api/files/image_data/image.jpg\", json={ 'annotations': [{'width': 10, 'height': 10, 'x': 0,", "def test_list_example_directory(client): response = client.get(\"/api/files\") assert response.status_code == 200 file_list", "= client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response = client.get(\"/api/files/image_data/image_annotated.jpg\") assert", "'more_images/02.png' def test_list_example_directory_nested(client): response = client.get(\"/api/files?path=more_images\") assert response.status_code == 200", "response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response = client.get(\"/api/files/image_data/image_annotated.jpg\")", "assert file_list[2]['key'] == 'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key']", "'tags' in data def test_put_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code", "def test_get_example_image_data(client): response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code == 404 response", "def test_list_example_directory_nested(client): response = client.get(\"/api/files?path=more_images\") assert response.status_code == 200 file_list", "= response.get_json() assert len(file_list) == 2 assert file_list[0]['key'] == '01.jpg'", "}) assert response.status_code == 200 response = client.get(\"/api/files/image_data/image.jpg\") assert response.status_code", "response.status_code == 200 file_list = response.get_json() assert len(file_list) == 5", "json={ 'annotations': [{'width': 10, 'height': 10, 'x': 0, 'y': 0}],", "'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg' assert file_list[2]['key'] == 'more_images/' assert", "= client.get(\"/api/files/image_data/image_annotated.jpg\") assert response.status_code == 200 data = response.get_json() assert", "= client.get(\"/api/files/image/x.jpg\") assert response.status_code == 404 response = client.get(\"/api/files/image/image.jpg\") assert", "'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client): response = client.get(\"/api/files?path=more_images\")", "assert file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client):", "== 200 file_list = response.get_json() assert len(file_list) == 5 assert", "in data assert 'tags' in data def test_put_example_image_data(client): response =" ]
[ "a cohorted partition in this course')), ('group_id', models.IntegerField(help_text='contains the id", "of a specific group within the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)),", "with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is", "name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id',", "db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel(", "related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID',", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CohortMembership',", "[ migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id',", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the name of", "with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)),", "name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether( name='cohortmembership', unique_together={('user', 'course_id')}, ), ]", "('name', models.CharField(help_text='What is the name of this group? Must be", "max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in", "serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are these", "('course_id', CourseKeyField(help_text='Which course is this group associated with?', max_length=255, db_index=True)),", "db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in this", "auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort', fields=[ ('id',", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the", "import migrations, models from django.conf import settings from opaque_keys.edx.django.models import", "unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ],", "), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted',", "db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id',", "field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ),", "name of this group? Must be unique within a course.',", "'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "of this group? Must be unique within a course.', max_length=255)),", "from opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether(", "to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField(", "on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether( name='cohortmembership', unique_together={('user',", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are", "auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are these settings", "migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether( name='cohortmembership', unique_together={('user', 'course_id')}, ),", "models.IntegerField(help_text='contains the id of a cohorted partition in this course')),", "('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField(", "to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership',", "'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)),", "primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])), ], ),", "course')), ('group_id', models.IntegerField(help_text='contains the id of a specific group within", "CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "operations = [ migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "], ), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField(", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CohortMembership', fields=[", "models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ),", "models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort',", "migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)),", "on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ),", "models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort',", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random',", "], ), migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "), migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type',", "id of a cohorted partition in this course')), ('group_id', models.IntegerField(help_text='contains", "models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE),", "serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id of a cohorted", "this course')), ('group_id', models.IntegerField(help_text='contains the id of a specific group", "('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup',", "course is this group associated with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20,", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ),", "name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the", "choices=[('random', 'Random'), ('manual', 'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id',", "is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel(", "django.db import migrations, models from django.conf import settings from opaque_keys.edx.django.models", "name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ),", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel(", "max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ),", "), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id',", "a specific group within the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at',", "partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ),", "of a cohorted partition in this course')), ('group_id', models.IntegerField(help_text='contains the", "this group associated with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])),", "be unique within a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course is", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual',", "model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),", "model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup',", "associated with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who", "cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ],", "migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL,", "migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains", "opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "('partition_id', models.IntegerField(help_text='contains the id of a cohorted partition in this", "group within the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group',", "] operations = [ migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "CourseKeyField(help_text='Which course is this group associated with?', max_length=255, db_index=True)), ('group_type',", "), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether( name='cohortmembership', unique_together={('user', 'course_id')},", "primary_key=True)), ('name', models.CharField(help_text='What is the name of this group? Must", "this group? Must be unique within a course.', max_length=255)), ('course_id',", "name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE),", "auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])), ],", "settings associated with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)),", "within a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course is this group", "the name of this group? Must be unique within a", "migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group',", "field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether( name='cohortmembership',", "id of a specific group within the cohorted partition')), ('created_at',", "models.CharField(help_text='What is the name of this group? Must be unique", "migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual',", "serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])),", "migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')},", "django.conf import settings from opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration): dependencies", "model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ),", "CourseKeyField(help_text='Which course are these settings associated with?', unique=True, max_length=255, db_index=True)),", "models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "partition in this course')), ('group_id', models.IntegerField(help_text='contains the id of a", "'Random'), ('manual', 'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID',", "('course_id', CourseKeyField(help_text='Which course are these settings associated with?', unique=True, max_length=255,", "), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name',", "unique within a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course is this", "], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup',", "group? Must be unique within a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which", "('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup',", "this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course", "import settings from opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration): dependencies =", "name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20,", "models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are these settings associated with?', unique=True,", "in this course')), ('group_id', models.IntegerField(help_text='contains the id of a specific", "), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user',", "settings from opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration): dependencies = [", "models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup', fields=[", "in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup',", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'),", "primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID',", "name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is", "('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ],", "auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id of a cohorted partition", "serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort', fields=[", "Must be unique within a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course", "models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings',", "CourseKeyField class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id of a cohorted partition in", "primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are these settings associated", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID',", "('manual', 'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are these settings associated with?',", "are these settings associated with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True,", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which", "), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name',", "a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course is this group associated", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CohortMembership', fields=[ ('id',", "max_length=255)), ('course_id', CourseKeyField(help_text='Which course is this group associated with?', max_length=255,", "serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the name of this", "), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership',", "models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups',", "], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID',", "these settings associated with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions',", "migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What", "auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the name of this group?", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id", "import CourseKeyField class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "], ), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "from django.conf import settings from opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration):", "course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course is this group associated with?',", "field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether(", "max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[", "within the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup',", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the name", "associated with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions',", "choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL,", "from django.db import migrations, models from django.conf import settings from", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id of", "is this group associated with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort',", "models.IntegerField(help_text='contains the id of a specific group within the cohorted", "('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort', name='course_user_group',", "the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)),", "= [ migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])), ], ), migrations.CreateModel(", "migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)),", "('group_id', models.IntegerField(help_text='contains the id of a specific group within the", "the id of a cohorted partition in this course')), ('group_id',", "group associated with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users',", "group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id',", "cohorted partition in this course')), ('group_id', models.IntegerField(help_text='contains the id of", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort',", "course are these settings associated with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions',", "name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ],", "models from django.conf import settings from opaque_keys.edx.django.models import CourseKeyField class", "the id of a specific group within the cohorted partition')),", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id of a", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "migrations, models from django.conf import settings from opaque_keys.edx.django.models import CourseKeyField", "('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in this group?',", "specific group within the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)),", "is the name of this group? Must be unique within" ]
[ "import csv import random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES", "fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1):", "main(): with open('customers.csv', 'w') as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID',", "for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record = {", "= { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if __name__", "writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record =", "\"\"\"Generate Customer Data\"\"\" import csv import random from config import", "= [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ]", "Customer Data\"\"\" import csv import random from config import MIN_CUSTOMER_ID,", "1): record = { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record)", "fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in", "MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display',", "'w') as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for", "from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch',", "MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate'", "writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID,", "'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def main(): with open('customers.csv', 'w')", "= csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID", "MAX_CUSTOMER_ID + 1): record = { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop()", "<filename>kafka-rockset-integration/generate_customers_data.py \"\"\"Generate Customer Data\"\"\" import csv import random from config", "'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if __name__ == '__main__':", "as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id", "'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def main(): with", "] def main(): with open('customers.csv', 'w') as fout: writer =", "csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID +", "int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if __name__ == '__main__': main()", "random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch',", "in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record = { 'CustomerID': int(customer_id),", "'Referral' ] def main(): with open('customers.csv', 'w') as fout: writer", "'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record", "import random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [", "config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email',", "customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record = { 'CustomerID':", "import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia',", "'Display', 'Affiliate' 'Referral' ] def main(): with open('customers.csv', 'w') as", "range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record = { 'CustomerID': int(customer_id), 'AcquisitionSource':", "Data\"\"\" import csv import random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID", "'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def main(): with open('customers.csv',", "+ 1): record = { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() }", "csv import random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES =", "'Affiliate' 'Referral' ] def main(): with open('customers.csv', 'w') as fout:", "with open('customers.csv', 'w') as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource'])", "open('customers.csv', 'w') as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader()", "{ 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if __name__ ==", "def main(): with open('customers.csv', 'w') as fout: writer = csv.DictWriter(fout,", "ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral'", "'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def main():", "record = { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if", "[ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def" ]
[ "parsl.executors.errors import UnsupportedFeatureError from parsl.executors import WorkQueueExecutor @python_app def double(x,", "import python_app from parsl.tests.configs.local_threads import config from parsl.executors.errors import UnsupportedFeatureError", "import config from parsl.executors.errors import UnsupportedFeatureError from parsl.executors import WorkQueueExecutor", "parsl_resource_specification={}): return x * 2 def test_resource(n=2): spec = {'cores':", "'1GiB'} fut = double(n, parsl_resource_specification=spec) try: fut.result() except Exception as", "from parsl.executors import WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}): return x", "executors = parsl.dfk().executors executor = None for label in executors:", "WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}): return x * 2 def", "for label in executors: if label != 'data_manager': executor =", "parsl.tests.configs.local_threads import config from parsl.executors.errors import UnsupportedFeatureError from parsl.executors import", "import WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}): return x * 2", "if __name__ == '__main__': local_config = config parsl.load(local_config) x =", "parsl.executors import WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}): return x *", "fut = double(n, parsl_resource_specification=spec) try: fut.result() except Exception as e:", "except Exception as e: assert isinstance(e, UnsupportedFeatureError) else: executors =", "= executors[label] break assert isinstance(executor, WorkQueueExecutor) if __name__ == '__main__':", "e: assert isinstance(e, UnsupportedFeatureError) else: executors = parsl.dfk().executors executor =", "from parsl.app.app import python_app from parsl.tests.configs.local_threads import config from parsl.executors.errors", "import parsl from parsl.app.app import python_app from parsl.tests.configs.local_threads import config", "parsl_resource_specification=spec) try: fut.result() except Exception as e: assert isinstance(e, UnsupportedFeatureError)", "def test_resource(n=2): spec = {'cores': 2, 'memory': '1GiB'} fut =", "executor = executors[label] break assert isinstance(executor, WorkQueueExecutor) if __name__ ==", "import UnsupportedFeatureError from parsl.executors import WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}):", "double(n, parsl_resource_specification=spec) try: fut.result() except Exception as e: assert isinstance(e,", "isinstance(e, UnsupportedFeatureError) else: executors = parsl.dfk().executors executor = None for", "WorkQueueExecutor) if __name__ == '__main__': local_config = config parsl.load(local_config) x", "label in executors: if label != 'data_manager': executor = executors[label]", "= double(n, parsl_resource_specification=spec) try: fut.result() except Exception as e: assert", "as e: assert isinstance(e, UnsupportedFeatureError) else: executors = parsl.dfk().executors executor", "= parsl.dfk().executors executor = None for label in executors: if", "2 def test_resource(n=2): spec = {'cores': 2, 'memory': '1GiB'} fut", "def double(x, parsl_resource_specification={}): return x * 2 def test_resource(n=2): spec", "None for label in executors: if label != 'data_manager': executor", "in executors: if label != 'data_manager': executor = executors[label] break", "'memory': '1GiB'} fut = double(n, parsl_resource_specification=spec) try: fut.result() except Exception", "@python_app def double(x, parsl_resource_specification={}): return x * 2 def test_resource(n=2):", "parsl from parsl.app.app import python_app from parsl.tests.configs.local_threads import config from", "{'cores': 2, 'memory': '1GiB'} fut = double(n, parsl_resource_specification=spec) try: fut.result()", "__name__ == '__main__': local_config = config parsl.load(local_config) x = test_resource(2)", "break assert isinstance(executor, WorkQueueExecutor) if __name__ == '__main__': local_config =", "try: fut.result() except Exception as e: assert isinstance(e, UnsupportedFeatureError) else:", "= {'cores': 2, 'memory': '1GiB'} fut = double(n, parsl_resource_specification=spec) try:", "spec = {'cores': 2, 'memory': '1GiB'} fut = double(n, parsl_resource_specification=spec)", "double(x, parsl_resource_specification={}): return x * 2 def test_resource(n=2): spec =", "if label != 'data_manager': executor = executors[label] break assert isinstance(executor,", "python_app from parsl.tests.configs.local_threads import config from parsl.executors.errors import UnsupportedFeatureError from", "assert isinstance(e, UnsupportedFeatureError) else: executors = parsl.dfk().executors executor = None", "parsl.app.app import python_app from parsl.tests.configs.local_threads import config from parsl.executors.errors import", "isinstance(executor, WorkQueueExecutor) if __name__ == '__main__': local_config = config parsl.load(local_config)", "assert isinstance(executor, WorkQueueExecutor) if __name__ == '__main__': local_config = config", "return x * 2 def test_resource(n=2): spec = {'cores': 2,", "UnsupportedFeatureError) else: executors = parsl.dfk().executors executor = None for label", "config from parsl.executors.errors import UnsupportedFeatureError from parsl.executors import WorkQueueExecutor @python_app", "x * 2 def test_resource(n=2): spec = {'cores': 2, 'memory':", "* 2 def test_resource(n=2): spec = {'cores': 2, 'memory': '1GiB'}", "= None for label in executors: if label != 'data_manager':", "Exception as e: assert isinstance(e, UnsupportedFeatureError) else: executors = parsl.dfk().executors", "'data_manager': executor = executors[label] break assert isinstance(executor, WorkQueueExecutor) if __name__", "from parsl.executors.errors import UnsupportedFeatureError from parsl.executors import WorkQueueExecutor @python_app def", "2, 'memory': '1GiB'} fut = double(n, parsl_resource_specification=spec) try: fut.result() except", "test_resource(n=2): spec = {'cores': 2, 'memory': '1GiB'} fut = double(n,", "parsl.dfk().executors executor = None for label in executors: if label", "from parsl.tests.configs.local_threads import config from parsl.executors.errors import UnsupportedFeatureError from parsl.executors", "UnsupportedFeatureError from parsl.executors import WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}): return", "else: executors = parsl.dfk().executors executor = None for label in", "<gh_stars>0 import parsl from parsl.app.app import python_app from parsl.tests.configs.local_threads import", "!= 'data_manager': executor = executors[label] break assert isinstance(executor, WorkQueueExecutor) if", "executors[label] break assert isinstance(executor, WorkQueueExecutor) if __name__ == '__main__': local_config", "fut.result() except Exception as e: assert isinstance(e, UnsupportedFeatureError) else: executors", "executors: if label != 'data_manager': executor = executors[label] break assert", "label != 'data_manager': executor = executors[label] break assert isinstance(executor, WorkQueueExecutor)", "executor = None for label in executors: if label !=" ]
[ "__include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): \"\"\"Include files from sub directories\"\"\"", "If there is local match for file/directory, it is marked", "is changed to be relative of working directory of container,", "-> List['FileMatcher']: \"\"\"Parse pattens from a list\"\"\" res = []", "parent_check=False) # Potential path as argument, not dividing it pieces", "if len_s > 0: if len_v < i + len_s", "upload - must mean some sub directory for output p", "res = [] for file in files: if self.__match(file) ==", "h_file, a_name = self.__archive_name_for(o_file) if h_file not in already_listed: self.host_files.append(h_file)", "has no whitespace in arguments, we are processing this part", "args self.directory = directory self.host_files: List[pathlib.Path] = [] self.command_args =", "0: if len_v < i + len_s or value[i:i +", "files by filters, perhaps? for filth in input_filters or []:", "/../ used (ok, quite weak) return file.is_absolute() or (\"..\" in", "path as argument, not dividing it pieces yet for further", "in match_string self.absolute_path = match_string.startswith('/') self.include = include @classmethod def", "# include files in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists:", "perhaps? for filth in input_filters or []: self.host_files = filth.filter_upload_files(self.host_files)", "def __analyze(self): \"\"\"Analyze the command line\"\"\" self.command_args = [] already_listed:", "# When filename contains potentially spaces, were are only interested", "List[FileMatcher] = None): self.original_args = args self.directory = directory self.host_files:", "return self.match_string == value split = self.match_string.split(\"*\") i = 0", "from sub directories\"\"\" for f in files: if f not", "there is existing parent directory, perhaps for output o_parent =", "split command (On top of shlex basic) self.additional_punc_chars = \"=,\"", "== '/' for c in path]) # When filename contains", "files is None else files res = [] # filter", "o_file = pathlib.Path(path) # does file/dir exists? No attempt to", "# ...and there is existing parent directory, perhaps for output", "path: str, already_listed: Set[pathlib.Path], parent_check: bool = True) -> Optional[str]:", "bool: \"\"\"Match value with this pattern\"\"\" if self.exact: return self.match_string", "arc_name cmd_args = self.command_args return cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]]", "to working directory res = [] for file in files:", "uploaded file\"\"\" if cls.__use_absolute_path(file): h_file = file.resolve() a_file = file.resolve().as_posix()", "len(s) if len_s > 0: off = value.find(s, off) if", "match_strings: List[str]) -> List['FileMatcher']: \"\"\"Parse pattens from a list\"\"\" res", "[] self.command_args = args.copy() # Additional punctuation chars, whereas we", "[] for file in files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except", "self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): \"\"\"Resolve the files", "h_file not in already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/' in the", "len_s] != s: return False off += len_s i +=", "from command line arguments\"\"\" def __init__(self, args: List[str], directory: pathlib.Path,", "filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]: \"\"\"Filter uploaded files by this", "True # ...and there is existing parent directory, perhaps for", "o_file.is_absolute() and '..' not in o_file.as_posix(): # the file does", "a_name += '/' if file_exists and o_file.is_dir() and o_file not", "lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths =", "in input_filters or []: self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self, path:", "absolute path # Not checking parents if not file_exists and", "in the end gets eaten away... fix for p in", "arguments\"\"\" def __init__(self, args: List[str], directory: pathlib.Path, output_dirs: List[str] =", "dividing it pieces yet for further analysis if a_name: self.command_args.append(a_name)", "self.output_dirs: # include files in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if", "= [] for part in split: a_name = self.__file_exists(part, already_listed)", "a_name: modified_paths.append((part, a_name)) for m_part, m_name in modified_paths: o_arg =", "posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths = [] for part", "exclude files by filters, perhaps? for filth in input_filters or", "in files: if f not in file_set: self.host_files.append(f) file_set.add(f) if", "and off != len_v: return False return True class FileResolver:", "else files res = [] # filter out files which", "[] for part in split: a_name = self.__file_exists(part, already_listed) if", "str, already_listed: Set[pathlib.Path], parent_check: bool = True) -> Optional[str]: \"\"\"", "o_parent.parent if file_exists: h_file, a_name = self.__archive_name_for(o_file) if h_file not", "str, include: bool): self.match_string = match_string self.exact = '*' not", "str) -> List[str]: \"\"\"Filter downloaded files by this pattern\"\"\" if", "self.command_args return cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) ->", "uploadable file into container, and path is changed to be", "it as it is... file_exists = o_file.exists() and not all([c", "if split[-1] != '' and off != len_v: return False", "not Windows compatible! lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split =", "and o_file.is_dir() and o_file not in self.output_dirs: # include files", "pattern\"\"\" if self.exact: return self.match_string == value split = self.match_string.split(\"*\")", "filth.filter_upload_files(self.host_files) def __file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check: bool =", "List['FileMatcher']: \"\"\"Parse pattens from a list\"\"\" res = [] for", "absolute path to refer a file path?\"\"\" # - use", "pathlib.Path) -> bool: \"\"\"Should use absolute path to refer a", "len_s = len(s) if len_s > 0: if len_v <", "self.match_string = match_string self.exact = '*' not in match_string self.absolute_path", "output_dirs: List[str] = None, do_resolve: bool = True, input_filters: List[FileMatcher]", "it is relative path to a file/directory... o_parent = o_file.parent", "file_exists: return a_name else: return None def __analyze(self): \"\"\"Analyze the", "directories\"\"\" for f in files: if f not in file_set:", "not file_exists and not parent_check and not \" \" in", "FileResolver: \"\"\"Resolve files from command line arguments\"\"\" def __init__(self, args:", "Method for evaluating the possible existence of input files and", "pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not self.include: res.append(file) continue if self.__match(rel_file)", "in (output_dirs or [])]) if do_resolve: # autodetect input files", "def __init__(self, match_string: str, include: bool): self.match_string = match_string self.exact", "len_v = len(value) s = split[0] len_s = len(s) if", "o_file.as_posix(): # the file does not exist, but it is", "= len(s) if len_s > 0: if len_v < i", "for p in file.parents: all_dirs.add(p) for file in filter(lambda f:", "# Not checking parents if not file_exists and not parent_check", "make sure also paths leading to output files are uploaded", "it is... file_exists = o_file.exists() and not all([c == '/'", "files by this pattern\"\"\" return list(filter(lambda f: self.__match(f.as_posix()) == self.include,", "file.parents: all_dirs.add(p) for file in filter(lambda f: not f.exists(), it_files):", "fix for p in range(len(path) - 1, 0, -1): if", "p = file.parent while not p.exists(): p = p.parent if", "+= 1 off += len_s if split[-1] != '' and", "# the file does not exist, but it is relative", "Tuple[pathlib.Path, str]: \"\"\"Resolve host file and archive name for uploaded", "not in match_string self.absolute_path = match_string.startswith('/') self.include = include @classmethod", "relative to working directory res = [] for file in", "= p.parent if p not in all_dirs: res.append(p) return res", "file.parent while not p.exists(): p = p.parent if p not", "output o_parent = o_parent.parent if file_exists: h_file, a_name = self.__archive_name_for(o_file)", "are uploaded all_dirs = set() for file in res: all_dirs.add(file)", "len_s i += 1 while i < len(split): s =", "files are uploaded all_dirs = set() for file in res:", "not self.include: res.append(file) continue if self.__match(rel_file) == self.include: res.append(file) return", "\"\"\"Match value with this pattern\"\"\" if self.exact: return self.match_string ==", "None): self.original_args = args self.directory = directory self.host_files: List[pathlib.Path] =", "if file_exists: h_file, a_name = self.__archive_name_for(o_file) if h_file not in", "might split command (On top of shlex basic) self.additional_punc_chars =", "directory for output p = file.parent while not p.exists(): p", "file_set: Set[pathlib.Path]): \"\"\"Include files from sub directories\"\"\" for f in", "and o_parent and o_parent.as_posix() != '.': if o_parent.is_dir() and o_parent", "str]): \"\"\"Resolve the files to upload\"\"\" for up_file in self.detect_upload_files():", "as it is... file_exists = o_file.exists() and not all([c ==", "matching files relative to working directory res = [] for", "file/directory, it is marked as uploadable file into container, and", "= directory self.host_files: List[pathlib.Path] = [] self.command_args = args.copy() #", "= len(value) s = split[0] len_s = len(s) if len_s", "for f in files: if f not in file_set: self.host_files.append(f)", "it_files = sorted(self.host_files) if files is None else files res", "self.include = include @classmethod def parse(cls, match_strings: List[str]) -> List['FileMatcher']:", "while i < len(split): s = split[i] len_s = len(s)", "= True, input_filters: List[FileMatcher] = None): self.original_args = args self.directory", "= match_string self.exact = '*' not in match_string self.absolute_path =", "from a list\"\"\" res = [] for m in match_strings:", "this pattern\"\"\" if self.exact: return self.match_string == value split =", "a pattern\"\"\" def __init__(self, match_string: str, include: bool): self.match_string =", "already_listed: Set[pathlib.Path], parent_check: bool = True) -> Optional[str]: \"\"\" Method", "attempt to copy '/', leave it as it is... file_exists", "\"\"\"Analyze the command line\"\"\" self.command_args = [] already_listed: Set[pathlib.Path] =", "for m in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m,", "bool): self.match_string = match_string self.exact = '*' not in match_string", "command (On top of shlex basic) self.additional_punc_chars = \"=,\" #", "# does file/dir exists? No attempt to copy '/', leave", "len(value) s = split[0] len_s = len(s) if len_s >", "res.append(FileMatcher(m, include=True)) return res def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]:", "in range(len(path) - 1, 0, -1): if path[p] != '/':", "path is changed to be relative of working directory of", "\"\"\" o_file = pathlib.Path(path) # does file/dir exists? No attempt", "no whitespace in arguments, we are processing this part later,", "filter(lambda f: not f.exists(), it_files): # file not exists, but", "file/dir exists? No attempt to copy '/', leave it as", "if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): \"\"\"Resolve", "case: when possible argument is coming from first layer (not", "leading to output files are uploaded all_dirs = set() for", "a_file = file.resolve().as_posix() a_file = a_file[1:] if a_file.startswith('/') else a_file", "in path]) # When filename contains potentially spaces, were are", "o_file.exists() and not all([c == '/' for c in path])", "typing import List, Optional, Dict, Set, Tuple, Iterable import shlex", "in self.output_dirs: res.append(file) if files is None: # make sure", "self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path,", "this pattern\"\"\" if self.absolute_path: # matching absolute files res =", "else: # matching files relative to working directory res =", "i += 1 off += len_s if split[-1] != ''", "continue # NOTE: Shlex not Windows compatible! lex = shlex.shlex(o_arg,", "self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for d in (output_dirs or [])])", "and archive name for uploaded file\"\"\" if cls.__use_absolute_path(file): h_file =", "file in it_files: if file.exists() or file in self.output_dirs: res.append(file)", "try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not self.include: res.append(file)", "!= len_v: return False return True class FileResolver: \"\"\"Resolve files", "a_file @classmethod def __use_absolute_path(cls, file: pathlib.Path) -> bool: \"\"\"Should use", "files is None: # make sure also paths leading to", "import List, Optional, Dict, Set, Tuple, Iterable import shlex class", "1 while i < len(split): s = split[i] len_s =", "if files is None: # make sure also paths leading", "split: a_name = self.__file_exists(part, already_listed) if a_name: modified_paths.append((part, a_name)) for", "files which do not exist nor should exists for file", "directories. If there is local match for file/directory, it is", "= None, do_resolve: bool = True, input_filters: List[FileMatcher] = None):", "Tuple, Iterable import shlex class FileMatcher: \"\"\"Match files based on", "a file/directory... o_parent = o_file.parent while not file_exists and o_parent", "# Additional punctuation chars, whereas we might split command (On", "exist, but it is relative path to a file/directory... o_parent", "files res = [] # filter out files which do", "work_dir: str) -> List[str]: \"\"\"Filter downloaded files by this pattern\"\"\"", "by filters, perhaps? for filth in input_filters or []: self.host_files", "which do not exist nor should exists for file in", "re from typing import List, Optional, Dict, Set, Tuple, Iterable", "res.append(file) continue if self.__match(rel_file) == self.include: res.append(file) return res def", "return False i += 1 off += len_s if split[-1]", "upload\"\"\" for up_file in self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file]", "o_parent and o_parent.as_posix() != '.': if o_parent.is_dir() and o_parent not", "files to upload\"\"\" it_files = sorted(self.host_files) if files is None", "value: str) -> bool: \"\"\"Match value with this pattern\"\"\" if", "= self.__file_exists(o_arg, already_listed, parent_check=False) # Potential path as argument, not", "if a_name: self.command_args.append(a_name) continue # NOTE: Shlex not Windows compatible!", "shlex class FileMatcher: \"\"\"Match files based on a pattern\"\"\" def", "self.match_string == value split = self.match_string.split(\"*\") i = 0 off", "the possible existence of input files and potential output directories.", "f not in file_set: self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set)", "file: pathlib.Path) -> bool: \"\"\"Should use absolute path to refer", "range(len(path) - 1, 0, -1): if path[p] != '/': break", "List[pathlib.Path]: \"\"\"Detect files to upload\"\"\" it_files = sorted(self.host_files) if files", "use absolute path to refer a file path?\"\"\" # -", "list\"\"\" res = [] for m in match_strings: if m.startswith('^'):", "autodetect input files self.__analyze() # exclude files by filters, perhaps?", "not parent_check and not \" \" in path: return None", "path[p] != '/': break a_name += '/' if file_exists and", "them without contents for dir in output_dirs or []: self.host_files.append(pathlib.Path(dir))", "1 off += len_s if split[-1] != '' and off", "file in self.output_dirs: res.append(file) if files is None: # make", "input_filters or []: self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self, path: str,", "but it is relative path to a file/directory... o_parent =", "for file/directory, it is marked as uploadable file into container,", "def __match(self, value: str) -> bool: \"\"\"Match value with this", "if not file_exists and not o_file.is_absolute() and '..' not in", "except ValueError: if not self.include: res.append(file) continue if self.__match(rel_file) ==", "directory: pathlib.Path, output_dirs: List[str] = None, do_resolve: bool = True,", "and '..' not in o_file.as_posix(): # the file does not", "'/' in the end gets eaten away... fix for p", "downloaded files by this pattern\"\"\" if self.absolute_path: # matching absolute", "is valid path and has no whitespace in arguments, we", "in already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/' in the end gets", "a_name = self.__file_exists(o_arg, already_listed, parent_check=False) # Potential path as argument,", "file in files: if self.__match(file) == self.include: res.append(file) return res", "# make sure also paths leading to output files are", "f.exists(), it_files): # file not exists, but marked for upload", "in split: a_name = self.__file_exists(part, already_listed) if a_name: modified_paths.append((part, a_name))", "chars, whereas we might split command (On top of shlex", "'' and off != len_v: return False return True class", "upload_files: Dict[pathlib.Path, str]): \"\"\"Resolve the files to upload\"\"\" for up_file", "directory res = [] for file in files: try: rel_file", "self.output_dirs = set([pathlib.Path(d) for d in (output_dirs or [])]) if", "o_parent = o_file.parent while not file_exists and o_parent and o_parent.as_posix()", "self.host_files: List[pathlib.Path] = [] self.command_args = args.copy() # Additional punctuation", "None if not file_exists and not o_file.is_absolute() and '..' not", "self.match_string.split(\"*\") i = 0 off = 0 len_v = len(value)", "def parse(cls, match_strings: List[str]) -> List['FileMatcher']: \"\"\"Parse pattens from a", "o_parent not in self.host_files: file_exists = True # ...and there", "res.append(file) return res else: # matching files relative to working", "argument is coming from first layer (not quoted) of arguments,", "-> List[pathlib.Path]: \"\"\"Filter uploaded files by this pattern\"\"\" return list(filter(lambda", "str]: \"\"\"Resolve host file and archive name for uploaded file\"\"\"", "files by this pattern\"\"\" if self.absolute_path: # matching absolute files", "the command line\"\"\" self.command_args = [] already_listed: Set[pathlib.Path] = self.output_dirs.copy()", "input files and potential output directories. If there is local", "not o_file.is_absolute() and '..' not in o_file.as_posix(): # the file", "return cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]:", "output p = file.parent while not p.exists(): p = p.parent", "import shlex class FileMatcher: \"\"\"Match files based on a pattern\"\"\"", "return h_file, a_file @classmethod def __use_absolute_path(cls, file: pathlib.Path) -> bool:", "and o_parent not in self.host_files: file_exists = True # ...and", "it_files: if file.exists() or file in self.output_dirs: res.append(file) if files", "file a_file = file.as_posix() return h_file, a_file @classmethod def __use_absolute_path(cls,", "command is passed into container. Special case: when possible argument", "for output p = file.parent while not p.exists(): p =", "and path is changed to be relative of working directory", "self.exact = '*' not in match_string self.absolute_path = match_string.startswith('/') self.include", "!= '.': if o_parent.is_dir() and o_parent not in self.host_files: file_exists", "up_file in self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file] = arc_name", "all_dirs.add(p) for file in filter(lambda f: not f.exists(), it_files): #", "self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args =", "path to a file/directory... o_parent = o_file.parent while not file_exists", "continue if self.__match(rel_file) == self.include: res.append(file) return res def __match(self,", "= self.output_dirs.copy() for o_arg in self.original_args: a_name = self.__file_exists(o_arg, already_listed,", "can support special markups such as % and & in", "'.': if o_parent.is_dir() and o_parent not in self.host_files: file_exists =", "for m_part, m_name in modified_paths: o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg)", "or []: self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self, path: str, already_listed:", "by this pattern\"\"\" return list(filter(lambda f: self.__match(f.as_posix()) == self.include, files))", "in all_dirs: res.append(p) return res @classmethod def __archive_name_for(cls, file: pathlib.Path)", "in o_file.as_posix(): # the file does not exist, but it", "resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): \"\"\"Resolve the files to upload\"\"\" for", "res @classmethod def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]: \"\"\"Resolve", "= 0 off = 0 len_v = len(value) s =", "- use absolute paths, if /../ used (ok, quite weak)", "compatible! lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths", "if do_resolve: # autodetect input files self.__analyze() # exclude files", "0, -1): if path[p] != '/': break a_name += '/'", "# file not exists, but marked for upload - must", "file\"\"\" if cls.__use_absolute_path(file): h_file = file.resolve() a_file = file.resolve().as_posix() a_file", "file path?\"\"\" # - use absolute paths, if /../ used", "directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return a_name else: return None", "Optional, Dict, Set, Tuple, Iterable import shlex class FileMatcher: \"\"\"Match", "= \"=,\" # these are output directories, upload them without", "perhaps for output o_parent = o_parent.parent if file_exists: h_file, a_name", "with this pattern\"\"\" if self.exact: return self.match_string == value split", "include=False)) else: res.append(FileMatcher(m, include=True)) return res def filter_upload_files(self, files: List[pathlib.Path])", "-> bool: \"\"\"Match value with this pattern\"\"\" if self.exact: return", "return False off += len_s i += 1 while i", "sub directories\"\"\" for f in files: if f not in", "a_file[1:] if a_file.startswith('/') else a_file else: h_file = file a_file", "\"\"\" Method for evaluating the possible existence of input files", "\"\"\"Filter uploaded files by this pattern\"\"\" return list(filter(lambda f: self.__match(f.as_posix())", "a_file.startswith('/') else a_file else: h_file = file a_file = file.as_posix()", "arc_name = self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args = self.command_args return", "is None else files res = [] # filter out", "__analyze(self): \"\"\"Analyze the command line\"\"\" self.command_args = [] already_listed: Set[pathlib.Path]", "in it_files: if file.exists() or file in self.output_dirs: res.append(file) if", "all_dirs = set() for file in res: all_dirs.add(file) for p", "from first layer (not quoted) of arguments, is valid path", "+= len_s if split[-1] != '' and off != len_v:", "working directory res = [] for file in files: try:", "are only interested about absolute path # Not checking parents", "o_arg in self.original_args: a_name = self.__file_exists(o_arg, already_listed, parent_check=False) # Potential", "the file does not exist, but it is relative path", "and o_parent.as_posix() != '.': if o_parent.is_dir() and o_parent not in", "filename contains potentially spaces, were are only interested about absolute", "__init__(self, args: List[str], directory: pathlib.Path, output_dirs: List[str] = None, do_resolve:", "of container, when command is passed into container. Special case:", "def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]: \"\"\"Resolve host file", "from typing import List, Optional, Dict, Set, Tuple, Iterable import", "file.resolve() a_file = file.resolve().as_posix() a_file = a_file[1:] if a_file.startswith('/') else", "for file in it_files: if file.exists() or file in self.output_dirs:", "file/directory... o_parent = o_file.parent while not file_exists and o_parent and", "break a_name += '/' if file_exists and o_file.is_dir() and o_file", "command line arguments\"\"\" def __init__(self, args: List[str], directory: pathlib.Path, output_dirs:", "self.original_args: a_name = self.__file_exists(o_arg, already_listed, parent_check=False) # Potential path as", "all_dirs: res.append(p) return res @classmethod def __archive_name_for(cls, file: pathlib.Path) ->", "return True class FileResolver: \"\"\"Resolve files from command line arguments\"\"\"", "return False return True class FileResolver: \"\"\"Resolve files from command", "as % and & in here. \"\"\" o_file = pathlib.Path(path)", "detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]: \"\"\"Detect files to", "res.append(file) if files is None: # make sure also paths", "not p.exists(): p = p.parent if p not in all_dirs:", "as argument, not dividing it pieces yet for further analysis", "Set[pathlib.Path]): \"\"\"Include files from sub directories\"\"\" for f in files:", "if self.exact: return self.match_string == value split = self.match_string.split(\"*\") i", "for p in range(len(path) - 1, 0, -1): if path[p]", "for file in files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError:", "= file.resolve().as_posix() a_file = a_file[1:] if a_file.startswith('/') else a_file else:", "if file_exists and o_file.is_dir() and o_file not in self.output_dirs: #", "# Potential path as argument, not dividing it pieces yet", "\"\"\"Resolve files from command line arguments\"\"\" def __init__(self, args: List[str],", "filter out files which do not exist nor should exists", "in files: if self.__match(file) == self.include: res.append(file) return res else:", "path and has no whitespace in arguments, we are processing", "files)) def filter_download_files(self, files: List[str], work_dir: str) -> List[str]: \"\"\"Filter", "pattern\"\"\" def __init__(self, match_string: str, include: bool): self.match_string = match_string", "marked as uploadable file into container, and path is changed", "a_name = self.__archive_name_for(o_file) if h_file not in already_listed: self.host_files.append(h_file) already_listed.add(h_file)", "self.command_args = [] already_listed: Set[pathlib.Path] = self.output_dirs.copy() for o_arg in", "= a_file[1:] if a_file.startswith('/') else a_file else: h_file = file", "p in range(len(path) - 1, 0, -1): if path[p] !=", "file_exists and not parent_check and not \" \" in path:", "also paths leading to output files are uploaded all_dirs =", "basic) self.additional_punc_chars = \"=,\" # these are output directories, upload", "to upload\"\"\" it_files = sorted(self.host_files) if files is None else", "it_files): # file not exists, but marked for upload -", "in output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for d", "paths leading to output files are uploaded all_dirs = set()", "# filter out files which do not exist nor should", "bool: \"\"\"Should use absolute path to refer a file path?\"\"\"", "not in self.output_dirs: # include files in sub directories self.__include_sub_dirs(o_file.iterdir(),", "modified_paths: o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path],", "= self.__archive_name_for(o_file) if h_file not in already_listed: self.host_files.append(h_file) already_listed.add(h_file) #", "if not file_exists and not parent_check and not \" \"", "output files are uploaded all_dirs = set() for file in", "file in res: all_dirs.add(file) for p in file.parents: all_dirs.add(p) for", "h_file = file a_file = file.as_posix() return h_file, a_file @classmethod", "if file_exists: return a_name else: return None def __analyze(self): \"\"\"Analyze", "matching absolute files res = [] for file in files:", "these are output directories, upload them without contents for dir", "whitespace in arguments, we are processing this part later, because", "o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): \"\"\"Include", "h_file, a_file @classmethod def __use_absolute_path(cls, file: pathlib.Path) -> bool: \"\"\"Should", "List, Optional, Dict, Set, Tuple, Iterable import shlex class FileMatcher:", "files in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return a_name", "possible existence of input files and potential output directories. If", "exists, but marked for upload - must mean some sub", "Set[pathlib.Path] = self.output_dirs.copy() for o_arg in self.original_args: a_name = self.__file_exists(o_arg,", "off) if off < 0: return False i += 1", "later, because we can support special markups such as %", "len_v: return False return True class FileResolver: \"\"\"Resolve files from", "+= len_s i += 1 while i < len(split): s", "FileMatcher: \"\"\"Match files based on a pattern\"\"\" def __init__(self, match_string:", "file does not exist, but it is relative path to", "while not p.exists(): p = p.parent if p not in", "...and there is existing parent directory, perhaps for output o_parent", "contains potentially spaces, were are only interested about absolute path", "shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths = [] for", "be relative of working directory of container, when command is", "= [] for file in files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix()", "= [] for m in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False))", "Additional punctuation chars, whereas we might split command (On top", "[])]) if do_resolve: # autodetect input files self.__analyze() # exclude", "here. \"\"\" o_file = pathlib.Path(path) # does file/dir exists? No", "files to upload\"\"\" for up_file in self.detect_upload_files(): host_file, arc_name =", "command line\"\"\" self.command_args = [] already_listed: Set[pathlib.Path] = self.output_dirs.copy() for", "+= '/' if file_exists and o_file.is_dir() and o_file not in", "by this pattern\"\"\" if self.absolute_path: # matching absolute files res", "off += len_s i += 1 while i < len(split):", "return res else: # matching files relative to working directory", "in arguments, we are processing this part later, because we", "= o_file.exists() and not all([c == '/' for c in", "and & in here. \"\"\" o_file = pathlib.Path(path) # does", "[] for m in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else:", "o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set:", "= args self.directory = directory self.host_files: List[pathlib.Path] = [] self.command_args", "possible argument is coming from first layer (not quoted) of", "support special markups such as % and & in here.", "in filter(lambda f: not f.exists(), it_files): # file not exists,", "return res @classmethod def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]:", "include @classmethod def parse(cls, match_strings: List[str]) -> List['FileMatcher']: \"\"\"Parse pattens", "is coming from first layer (not quoted) of arguments, is", "include files in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return", "exists? No attempt to copy '/', leave it as it", "arguments, is valid path and has no whitespace in arguments,", "return None if not file_exists and not o_file.is_absolute() and '..'", "= arc_name cmd_args = self.command_args return cmd_args def detect_upload_files(self, files:", "pathlib.Path(path) # does file/dir exists? No attempt to copy '/',", "= o_parent.parent if file_exists: h_file, a_name = self.__archive_name_for(o_file) if h_file", "o_parent = o_parent.parent if file_exists: h_file, a_name = self.__archive_name_for(o_file) if", "file in filter(lambda f: not f.exists(), it_files): # file not", "in res: all_dirs.add(file) for p in file.parents: all_dirs.add(p) for file", "if p not in all_dirs: res.append(p) return res @classmethod def", "self.absolute_path: # matching absolute files res = [] for file", "modified_paths.append((part, a_name)) for m_part, m_name in modified_paths: o_arg = o_arg.replace(m_part,", "exist nor should exists for file in it_files: if file.exists()", "file.exists() or file in self.output_dirs: res.append(file) if files is None:", "value with this pattern\"\"\" if self.exact: return self.match_string == value", "if not self.include: res.append(file) continue if self.__match(rel_file) == self.include: res.append(file)", "include: bool): self.match_string = match_string self.exact = '*' not in", "= split[0] len_s = len(s) if len_s > 0: if", "# these are output directories, upload them without contents for", "[] for file in files: if self.__match(file) == self.include: res.append(file)", "\"\"\"Resolve the files to upload\"\"\" for up_file in self.detect_upload_files(): host_file,", "self.__match(f.as_posix()) == self.include, files)) def filter_download_files(self, files: List[str], work_dir: str)", "a_name)) for m_part, m_name in modified_paths: o_arg = o_arg.replace(m_part, m_name)", "if o_parent.is_dir() and o_parent not in self.host_files: file_exists = True", "it is marked as uploadable file into container, and path", "is marked as uploadable file into container, and path is", "None, do_resolve: bool = True, input_filters: List[FileMatcher] = None): self.original_args", "upload_files[host_file] = arc_name cmd_args = self.command_args return cmd_args def detect_upload_files(self,", "= self.__file_exists(part, already_listed) if a_name: modified_paths.append((part, a_name)) for m_part, m_name", "set() for file in res: all_dirs.add(file) for p in file.parents:", "file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]):", "= set() for file in res: all_dirs.add(file) for p in", "but marked for upload - must mean some sub directory", "(not quoted) of arguments, is valid path and has no", "o_file.parent while not file_exists and o_parent and o_parent.as_posix() != '.':", "split[i] len_s = len(s) if len_s > 0: off =", "for file in filter(lambda f: not f.exists(), it_files): # file", "= [] for file in files: if self.__match(file) == self.include:", "already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/' in the end gets eaten", "passed into container. Special case: when possible argument is coming", "interested about absolute path # Not checking parents if not", "in file_set: self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self,", "if self.__match(rel_file) == self.include: res.append(file) return res def __match(self, value:", "= [] self.command_args = args.copy() # Additional punctuation chars, whereas", "relative path to a file/directory... o_parent = o_file.parent while not", "coming from first layer (not quoted) of arguments, is valid", "files relative to working directory res = [] for file", "file and archive name for uploaded file\"\"\" if cls.__use_absolute_path(file): h_file", "not file_exists and o_parent and o_parent.as_posix() != '.': if o_parent.is_dir()", "self.include: res.append(file) return res def __match(self, value: str) -> bool:", "False return True class FileResolver: \"\"\"Resolve files from command line", "directory of container, when command is passed into container. Special", "some sub directory for output p = file.parent while not", "do_resolve: bool = True, input_filters: List[FileMatcher] = None): self.original_args =", "top of shlex basic) self.additional_punc_chars = \"=,\" # these are", "such as % and & in here. \"\"\" o_file =", "files: if f not in file_set: self.host_files.append(f) file_set.add(f) if f.is_dir():", "!= s: return False off += len_s i += 1", "do not exist nor should exists for file in it_files:", "0: off = value.find(s, off) if off < 0: return", "= 0 len_v = len(value) s = split[0] len_s =", "files: List[pathlib.Path]) -> List[pathlib.Path]: \"\"\"Filter uploaded files by this pattern\"\"\"", "path: return None if not file_exists and not o_file.is_absolute() and", "a_name: self.command_args.append(a_name) continue # NOTE: Shlex not Windows compatible! lex", "files self.__analyze() # exclude files by filters, perhaps? for filth", "pathlib import re from typing import List, Optional, Dict, Set,", "self.include: res.append(file) continue if self.__match(rel_file) == self.include: res.append(file) return res", "input files self.__analyze() # exclude files by filters, perhaps? for", "files and potential output directories. If there is local match", "f: not f.exists(), it_files): # file not exists, but marked", "name for uploaded file\"\"\" if cls.__use_absolute_path(file): h_file = file.resolve() a_file", "-> Optional[str]: \"\"\" Method for evaluating the possible existence of", "# matching files relative to working directory res = []", "res: all_dirs.add(file) for p in file.parents: all_dirs.add(p) for file in", "self.additional_punc_chars = \"=,\" # these are output directories, upload them", "for uploaded file\"\"\" if cls.__use_absolute_path(file): h_file = file.resolve() a_file =", "When filename contains potentially spaces, were are only interested about", "__use_absolute_path(cls, file: pathlib.Path) -> bool: \"\"\"Should use absolute path to", "cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]: \"\"\"Detect", "directory, perhaps for output o_parent = o_parent.parent if file_exists: h_file,", "self.output_dirs.copy() for o_arg in self.original_args: a_name = self.__file_exists(o_arg, already_listed, parent_check=False)", "part in split: a_name = self.__file_exists(part, already_listed) if a_name: modified_paths.append((part,", "file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): \"\"\"Resolve the files to", "punctuation chars, whereas we might split command (On top of", "files from sub directories\"\"\" for f in files: if f", "local match for file/directory, it is marked as uploadable file", "first layer (not quoted) of arguments, is valid path and", "already_listed.add(h_file) # '/' in the end gets eaten away... fix", "value[i:i + len_s] != s: return False off += len_s", "yet for further analysis if a_name: self.command_args.append(a_name) continue # NOTE:", "pattern\"\"\" if self.absolute_path: # matching absolute files res = []", "str) -> bool: \"\"\"Match value with this pattern\"\"\" if self.exact:", "f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): \"\"\"Resolve the", "p.exists(): p = p.parent if p not in all_dirs: res.append(p)", "o_file not in self.output_dirs: # include files in sub directories", "< len(split): s = split[i] len_s = len(s) if len_s", "1, 0, -1): if path[p] != '/': break a_name +=", "output directories, upload them without contents for dir in output_dirs", "'/': break a_name += '/' if file_exists and o_file.is_dir() and", "or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for d in (output_dirs", "self.__file_exists(part, already_listed) if a_name: modified_paths.append((part, a_name)) for m_part, m_name in", "only interested about absolute path # Not checking parents if", "[] already_listed: Set[pathlib.Path] = self.output_dirs.copy() for o_arg in self.original_args: a_name", "[]: self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self, path: str, already_listed: Set[pathlib.Path],", "already_listed) if file_exists: return a_name else: return None def __analyze(self):", "= self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args = self.command_args return cmd_args", "and has no whitespace in arguments, we are processing this", "if self.absolute_path: # matching absolute files res = [] for", "if cls.__use_absolute_path(file): h_file = file.resolve() a_file = file.resolve().as_posix() a_file =", "to a file/directory... o_parent = o_file.parent while not file_exists and", "res = [] # filter out files which do not", "self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check:", "in self.host_files: file_exists = True # ...and there is existing", "not \" \" in path: return None if not file_exists", "args: List[str], directory: pathlib.Path, output_dirs: List[str] = None, do_resolve: bool", "o_parent.as_posix() != '.': if o_parent.is_dir() and o_parent not in self.host_files:", "self.absolute_path = match_string.startswith('/') self.include = include @classmethod def parse(cls, match_strings:", "split[0] len_s = len(s) if len_s > 0: if len_v", "if a_name: modified_paths.append((part, a_name)) for m_part, m_name in modified_paths: o_arg", "in file.parents: all_dirs.add(p) for file in filter(lambda f: not f.exists(),", "- must mean some sub directory for output p =", "Windows compatible! lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex)", "Shlex not Windows compatible! lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split", "# NOTE: Shlex not Windows compatible! lex = shlex.shlex(o_arg, posix=True,", "file in files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if", "relative of working directory of container, when command is passed", "# matching absolute files res = [] for file in", "for dir in output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d)", "-1): if path[p] != '/': break a_name += '/' if", "in self.original_args: a_name = self.__file_exists(o_arg, already_listed, parent_check=False) # Potential path", "if off < 0: return False i += 1 off", "analysis if a_name: self.command_args.append(a_name) continue # NOTE: Shlex not Windows", "= True # ...and there is existing parent directory, perhaps", "-> List[pathlib.Path]: \"\"\"Detect files to upload\"\"\" it_files = sorted(self.host_files) if", "i < len(split): s = split[i] len_s = len(s) if", "0: return False i += 1 off += len_s if", "p.parent if p not in all_dirs: res.append(p) return res @classmethod", "List[pathlib.Path]) -> List[pathlib.Path]: \"\"\"Filter uploaded files by this pattern\"\"\" return", "container, when command is passed into container. Special case: when", "return None def __analyze(self): \"\"\"Analyze the command line\"\"\" self.command_args =", "== self.include: res.append(file) return res def __match(self, value: str) ->", "files from command line arguments\"\"\" def __init__(self, args: List[str], directory:", "self.host_files: file_exists = True # ...and there is existing parent", "= None): self.original_args = args self.directory = directory self.host_files: List[pathlib.Path]", "without contents for dir in output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs", "== self.include: res.append(file) return res else: # matching files relative", "special markups such as % and & in here. \"\"\"", "Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]: \"\"\"Detect files to upload\"\"\" it_files", "not exists, but marked for upload - must mean some", "output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for d in", "h_file = file.resolve() a_file = file.resolve().as_posix() a_file = a_file[1:] if", "a_file = a_file[1:] if a_file.startswith('/') else a_file else: h_file =", "set([pathlib.Path(d) for d in (output_dirs or [])]) if do_resolve: #", "pattens from a list\"\"\" res = [] for m in", "filth in input_filters or []: self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self,", "and o_file not in self.output_dirs: # include files in sub", "= shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths = []", "is... file_exists = o_file.exists() and not all([c == '/' for", "if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True)) return res def", "checking parents if not file_exists and not parent_check and not", "len_s if split[-1] != '' and off != len_v: return", "files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not self.include:", "of working directory of container, when command is passed into", "def __init__(self, args: List[str], directory: pathlib.Path, output_dirs: List[str] = None,", "res = [] for file in files: try: rel_file =", "List[str] = None, do_resolve: bool = True, input_filters: List[FileMatcher] =", "c in path]) # When filename contains potentially spaces, were", "-> List[str]: \"\"\"Filter downloaded files by this pattern\"\"\" if self.absolute_path:", "file.resolve().as_posix() a_file = a_file[1:] if a_file.startswith('/') else a_file else: h_file", "None) -> List[pathlib.Path]: \"\"\"Detect files to upload\"\"\" it_files = sorted(self.host_files)", "punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths = [] for part in", "List[str]: \"\"\"Filter downloaded files by this pattern\"\"\" if self.absolute_path: #", "split[-1] != '' and off != len_v: return False return", "= file.parent while not p.exists(): p = p.parent if p", "self.__analyze() # exclude files by filters, perhaps? for filth in", "= split[i] len_s = len(s) if len_s > 0: off", "line arguments\"\"\" def __init__(self, args: List[str], directory: pathlib.Path, output_dirs: List[str]", "container. Special case: when possible argument is coming from first", "into container, and path is changed to be relative of", "path # Not checking parents if not file_exists and not", "were are only interested about absolute path # Not checking", "-> Tuple[pathlib.Path, str]: \"\"\"Resolve host file and archive name for", "all_dirs.add(file) for p in file.parents: all_dirs.add(p) for file in filter(lambda", "def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]: \"\"\"Detect files", "Set, Tuple, Iterable import shlex class FileMatcher: \"\"\"Match files based", "parent_check: bool = True) -> Optional[str]: \"\"\" Method for evaluating", "we might split command (On top of shlex basic) self.additional_punc_chars", "def filter_download_files(self, files: List[str], work_dir: str) -> List[str]: \"\"\"Filter downloaded", "\" \" in path: return None if not file_exists and", "modified_paths = [] for part in split: a_name = self.__file_exists(part,", "len_s or value[i:i + len_s] != s: return False off", "ValueError: if not self.include: res.append(file) continue if self.__match(rel_file) == self.include:", "argument, not dividing it pieces yet for further analysis if", "parents if not file_exists and not parent_check and not \"", "= file.resolve() a_file = file.resolve().as_posix() a_file = a_file[1:] if a_file.startswith('/')", "self.directory = directory self.host_files: List[pathlib.Path] = [] self.command_args = args.copy()", "s: return False off += len_s i += 1 while", "off = value.find(s, off) if off < 0: return False", "\"=,\" # these are output directories, upload them without contents", "Dict[pathlib.Path, str]): \"\"\"Resolve the files to upload\"\"\" for up_file in", "already_listed, parent_check=False) # Potential path as argument, not dividing it", "False off += len_s i += 1 while i <", "of arguments, is valid path and has no whitespace in", "to refer a file path?\"\"\" # - use absolute paths,", "split = list(lex) modified_paths = [] for part in split:", "Iterable[pathlib.Path], file_set: Set[pathlib.Path]): \"\"\"Include files from sub directories\"\"\" for f", "are output directories, upload them without contents for dir in", "uploaded all_dirs = set() for file in res: all_dirs.add(file) for", "self.include: res.append(file) return res else: # matching files relative to", "@classmethod def __use_absolute_path(cls, file: pathlib.Path) -> bool: \"\"\"Should use absolute", "self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): \"\"\"Include files from", "file_exists = o_file.exists() and not all([c == '/' for c", "for output o_parent = o_parent.parent if file_exists: h_file, a_name =", "files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): \"\"\"Include files from sub directories\"\"\" for", "not in already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/' in the end", "res def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]: \"\"\"Filter uploaded files", "len_v < i + len_s or value[i:i + len_s] !=", "filters, perhaps? for filth in input_filters or []: self.host_files =", "away... fix for p in range(len(path) - 1, 0, -1):", "pieces yet for further analysis if a_name: self.command_args.append(a_name) continue #", "not in all_dirs: res.append(p) return res @classmethod def __archive_name_for(cls, file:", "split = self.match_string.split(\"*\") i = 0 off = 0 len_v", "True class FileResolver: \"\"\"Resolve files from command line arguments\"\"\" def", "not all([c == '/' for c in path]) # When", "= file a_file = file.as_posix() return h_file, a_file @classmethod def", "valid path and has no whitespace in arguments, we are", "# autodetect input files self.__analyze() # exclude files by filters,", "because we can support special markups such as % and", "off = 0 len_v = len(value) s = split[0] len_s", "bool = True) -> Optional[str]: \"\"\" Method for evaluating the", "already_listed: Set[pathlib.Path] = self.output_dirs.copy() for o_arg in self.original_args: a_name =", "host file and archive name for uploaded file\"\"\" if cls.__use_absolute_path(file):", "True) -> Optional[str]: \"\"\" Method for evaluating the possible existence", "and not all([c == '/' for c in path]) #", "0 off = 0 len_v = len(value) s = split[0]", "in path: return None if not file_exists and not o_file.is_absolute()", "whereas we might split command (On top of shlex basic)", "is existing parent directory, perhaps for output o_parent = o_parent.parent", "\" in path: return None if not file_exists and not", "res = [] for m in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:],", "import re from typing import List, Optional, Dict, Set, Tuple,", "+ len_s] != s: return False off += len_s i", "True, input_filters: List[FileMatcher] = None): self.original_args = args self.directory =", "file_exists and o_file.is_dir() and o_file not in self.output_dirs: # include", "= '*' not in match_string self.absolute_path = match_string.startswith('/') self.include =", "res def __match(self, value: str) -> bool: \"\"\"Match value with", "len(s) if len_s > 0: if len_v < i +", "class FileResolver: \"\"\"Resolve files from command line arguments\"\"\" def __init__(self,", "res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True)) return res def filter_upload_files(self, files:", "s = split[i] len_s = len(s) if len_s > 0:", "# exclude files by filters, perhaps? for filth in input_filters", "further analysis if a_name: self.command_args.append(a_name) continue # NOTE: Shlex not", "< i + len_s or value[i:i + len_s] != s:", "about absolute path # Not checking parents if not file_exists", "len_s > 0: if len_v < i + len_s or", "eaten away... fix for p in range(len(path) - 1, 0,", "nor should exists for file in it_files: if file.exists() or", "for further analysis if a_name: self.command_args.append(a_name) continue # NOTE: Shlex", "of input files and potential output directories. If there is", "None def __analyze(self): \"\"\"Analyze the command line\"\"\" self.command_args = []", "the end gets eaten away... fix for p in range(len(path)", "self.exact: return self.match_string == value split = self.match_string.split(\"*\") i =", "file.as_posix() return h_file, a_file @classmethod def __use_absolute_path(cls, file: pathlib.Path) ->", "list(filter(lambda f: self.__match(f.as_posix()) == self.include, files)) def filter_download_files(self, files: List[str],", "m_name in modified_paths: o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self,", "and potential output directories. If there is local match for", "= [] already_listed: Set[pathlib.Path] = self.output_dirs.copy() for o_arg in self.original_args:", "copy '/', leave it as it is... file_exists = o_file.exists()", "> 0: if len_v < i + len_s or value[i:i", "match_string: str, include: bool): self.match_string = match_string self.exact = '*'", "file not exists, but marked for upload - must mean", "used (ok, quite weak) return file.is_absolute() or (\"..\" in file.as_posix())", "__init__(self, match_string: str, include: bool): self.match_string = match_string self.exact =", "else: return None def __analyze(self): \"\"\"Analyze the command line\"\"\" self.command_args", "def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]: \"\"\"Filter uploaded files by", "List[pathlib.Path]: \"\"\"Filter uploaded files by this pattern\"\"\" return list(filter(lambda f:", "List[str], directory: pathlib.Path, output_dirs: List[str] = None, do_resolve: bool =", "for upload - must mean some sub directory for output", "when possible argument is coming from first layer (not quoted)", "in here. \"\"\" o_file = pathlib.Path(path) # does file/dir exists?", "or [])]) if do_resolve: # autodetect input files self.__analyze() #", "= None) -> List[pathlib.Path]: \"\"\"Detect files to upload\"\"\" it_files =", "off += len_s if split[-1] != '' and off !=", "[] # filter out files which do not exist nor", "f in files: if f not in file_set: self.host_files.append(f) file_set.add(f)", "'/' if file_exists and o_file.is_dir() and o_file not in self.output_dirs:", "0 len_v = len(value) s = split[0] len_s = len(s)", "None else files res = [] # filter out files", "we can support special markups such as % and &", "self.include, files)) def filter_download_files(self, files: List[str], work_dir: str) -> List[str]:", "Not checking parents if not file_exists and not parent_check and", "on a pattern\"\"\" def __init__(self, match_string: str, include: bool): self.match_string", "if /../ used (ok, quite weak) return file.is_absolute() or (\"..\"", "uploaded files by this pattern\"\"\" return list(filter(lambda f: self.__match(f.as_posix()) ==", "return res def __match(self, value: str) -> bool: \"\"\"Match value", "Special case: when possible argument is coming from first layer", "m_part, m_name in modified_paths: o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def", "not in self.host_files: file_exists = True # ...and there is", "= o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]):", "arguments, we are processing this part later, because we can", "markups such as % and & in here. \"\"\" o_file", "file: pathlib.Path) -> Tuple[pathlib.Path, str]: \"\"\"Resolve host file and archive", "= list(lex) modified_paths = [] for part in split: a_name", "parse(cls, match_strings: List[str]) -> List['FileMatcher']: \"\"\"Parse pattens from a list\"\"\"", "of shlex basic) self.additional_punc_chars = \"=,\" # these are output", "sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return a_name else: return", "return a_name else: return None def __analyze(self): \"\"\"Analyze the command", "class FileMatcher: \"\"\"Match files based on a pattern\"\"\" def __init__(self,", "the files to upload\"\"\" for up_file in self.detect_upload_files(): host_file, arc_name", "len_s > 0: off = value.find(s, off) if off <", "for file in res: all_dirs.add(file) for p in file.parents: all_dirs.add(p)", "must mean some sub directory for output p = file.parent", "\"\"\"Resolve host file and archive name for uploaded file\"\"\" if", "as uploadable file into container, and path is changed to", "off < 0: return False i += 1 off +=", "not in file_set: self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def", "sorted(self.host_files) if files is None else files res = []", "= self.command_args return cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None)", "into container. Special case: when possible argument is coming from", "i + len_s or value[i:i + len_s] != s: return", "Optional[str]: \"\"\" Method for evaluating the possible existence of input", "== value split = self.match_string.split(\"*\") i = 0 off =", "sure also paths leading to output files are uploaded all_dirs", "Iterable import shlex class FileMatcher: \"\"\"Match files based on a", "\"\"\"Filter downloaded files by this pattern\"\"\" if self.absolute_path: # matching", "upload\"\"\" it_files = sorted(self.host_files) if files is None else files", "does not exist, but it is relative path to a", "output directories. If there is local match for file/directory, it", "match_string self.absolute_path = match_string.startswith('/') self.include = include @classmethod def parse(cls,", "file into container, and path is changed to be relative", "o_parent.is_dir() and o_parent not in self.host_files: file_exists = True #", "not dividing it pieces yet for further analysis if a_name:", "= sorted(self.host_files) if files is None else files res =", "\"\"\"Should use absolute path to refer a file path?\"\"\" #", "!= '/': break a_name += '/' if file_exists and o_file.is_dir()", "it pieces yet for further analysis if a_name: self.command_args.append(a_name) continue", "host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args = self.command_args", "for filth in input_filters or []: self.host_files = filth.filter_upload_files(self.host_files) def", "def __use_absolute_path(cls, file: pathlib.Path) -> bool: \"\"\"Should use absolute path", "match_string.startswith('/') self.include = include @classmethod def parse(cls, match_strings: List[str]) ->", "sub directory for output p = file.parent while not p.exists():", "in self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args", "\"\"\"Include files from sub directories\"\"\" for f in files: if", "and not parent_check and not \" \" in path: return", "based on a pattern\"\"\" def __init__(self, match_string: str, include: bool):", "if self.__match(file) == self.include: res.append(file) return res else: # matching", "<filename>cincan/file_tool.py import pathlib import re from typing import List, Optional,", "evaluating the possible existence of input files and potential output", "= self.match_string.split(\"*\") i = 0 off = 0 len_v =", "file_set: self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files:", "False i += 1 off += len_s if split[-1] !=", "No attempt to copy '/', leave it as it is...", "to be relative of working directory of container, when command", "__archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]: \"\"\"Resolve host file and", "= value.find(s, off) if off < 0: return False i", "import pathlib import re from typing import List, Optional, Dict,", "absolute paths, if /../ used (ok, quite weak) return file.is_absolute()", "rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not self.include: res.append(file) continue", "container, and path is changed to be relative of working", "- 1, 0, -1): if path[p] != '/': break a_name", "absolute files res = [] for file in files: if", "self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return a_name else: return None def", "in modified_paths: o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files:", "= file.as_posix() return h_file, a_file @classmethod def __use_absolute_path(cls, file: pathlib.Path)", "+= 1 while i < len(split): s = split[i] len_s", "input_filters: List[FileMatcher] = None): self.original_args = args self.directory = directory", "in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return a_name else:", "files: List[str], work_dir: str) -> List[str]: \"\"\"Filter downloaded files by", "= len(s) if len_s > 0: off = value.find(s, off)", "value.find(s, off) if off < 0: return False i +=", "< 0: return False i += 1 off += len_s", "'/', leave it as it is... file_exists = o_file.exists() and", "files: if self.__match(file) == self.include: res.append(file) return res else: #", "self.host_files.append(h_file) already_listed.add(h_file) # '/' in the end gets eaten away...", "None: # make sure also paths leading to output files", "all([c == '/' for c in path]) # When filename", "gets eaten away... fix for p in range(len(path) - 1,", "while not file_exists and o_parent and o_parent.as_posix() != '.': if", "files res = [] for file in files: if self.__match(file)", "pathlib.Path, output_dirs: List[str] = None, do_resolve: bool = True, input_filters:", "a file path?\"\"\" # - use absolute paths, if /../", "'..' not in o_file.as_posix(): # the file does not exist,", "is passed into container. Special case: when possible argument is", "a_file else: h_file = file a_file = file.as_posix() return h_file,", "file_exists = True # ...and there is existing parent directory,", "for part in split: a_name = self.__file_exists(part, already_listed) if a_name:", "not f.exists(), it_files): # file not exists, but marked for", "@classmethod def parse(cls, match_strings: List[str]) -> List['FileMatcher']: \"\"\"Parse pattens from", "self.command_args = args.copy() # Additional punctuation chars, whereas we might", "this pattern\"\"\" return list(filter(lambda f: self.__match(f.as_posix()) == self.include, files)) def", "d in (output_dirs or [])]) if do_resolve: # autodetect input", "for o_arg in self.original_args: a_name = self.__file_exists(o_arg, already_listed, parent_check=False) #", "else a_file else: h_file = file a_file = file.as_posix() return", "= True) -> Optional[str]: \"\"\" Method for evaluating the possible", "res.append(file) return res def __match(self, value: str) -> bool: \"\"\"Match", "= args.copy() # Additional punctuation chars, whereas we might split", "potential output directories. If there is local match for file/directory,", "bool = True, input_filters: List[FileMatcher] = None): self.original_args = args", "@classmethod def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]: \"\"\"Resolve host", "self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args = self.command_args return cmd_args def", "dir in output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for", "existence of input files and potential output directories. If there", "res.append(p) return res @classmethod def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path,", "should exists for file in it_files: if file.exists() or file", "> 0: off = value.find(s, off) if off < 0:", "for evaluating the possible existence of input files and potential", "list(lex) modified_paths = [] for part in split: a_name =", "quoted) of arguments, is valid path and has no whitespace", "filter_download_files(self, files: List[str], work_dir: str) -> List[str]: \"\"\"Filter downloaded files", "files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]: \"\"\"Detect files to upload\"\"\"", "or file in self.output_dirs: res.append(file) if files is None: #", "directories, upload them without contents for dir in output_dirs or", "in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True)) return", "else: res.append(FileMatcher(m, include=True)) return res def filter_upload_files(self, files: List[pathlib.Path]) ->", "a_file = file.as_posix() return h_file, a_file @classmethod def __use_absolute_path(cls, file:", "List[str], work_dir: str) -> List[str]: \"\"\"Filter downloaded files by this", "in self.output_dirs: # include files in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed)", "& in here. \"\"\" o_file = pathlib.Path(path) # does file/dir", "cls.__use_absolute_path(file): h_file = file.resolve() a_file = file.resolve().as_posix() a_file = a_file[1:]", "self.command_args.append(a_name) continue # NOTE: Shlex not Windows compatible! lex =", "match_string self.exact = '*' not in match_string self.absolute_path = match_string.startswith('/')", "[]: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for d in (output_dirs or", "not file_exists and not o_file.is_absolute() and '..' not in o_file.as_posix():", "out files which do not exist nor should exists for", "leave it as it is... file_exists = o_file.exists() and not", "Potential path as argument, not dividing it pieces yet for", "# - use absolute paths, if /../ used (ok, quite", "pathlib.Path) -> Tuple[pathlib.Path, str]: \"\"\"Resolve host file and archive name", "not exist nor should exists for file in it_files: if", "and not \" \" in path: return None if not", "changed to be relative of working directory of container, when", "List[str]) -> List['FileMatcher']: \"\"\"Parse pattens from a list\"\"\" res =", "self.__file_exists(o_arg, already_listed, parent_check=False) # Potential path as argument, not dividing", "to upload\"\"\" for up_file in self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file)", "!= '' and off != len_v: return False return True", "def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): \"\"\"Include files from sub", "refer a file path?\"\"\" # - use absolute paths, if", "line\"\"\" self.command_args = [] already_listed: Set[pathlib.Path] = self.output_dirs.copy() for o_arg", "i = 0 off = 0 len_v = len(value) s", "self.__match(file) == self.include: res.append(file) return res else: # matching files", "do_resolve: # autodetect input files self.__analyze() # exclude files by", "if f not in file_set: self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(),", "directory self.host_files: List[pathlib.Path] = [] self.command_args = args.copy() # Additional", "processing this part later, because we can support special markups", "already_listed) if a_name: modified_paths.append((part, a_name)) for m_part, m_name in modified_paths:", "and not o_file.is_absolute() and '..' not in o_file.as_posix(): # the", "files based on a pattern\"\"\" def __init__(self, match_string: str, include:", "if path[p] != '/': break a_name += '/' if file_exists", "to output files are uploaded all_dirs = set() for file", "value split = self.match_string.split(\"*\") i = 0 off = 0", "when command is passed into container. Special case: when possible", "if files is None else files res = [] #", "path?\"\"\" # - use absolute paths, if /../ used (ok,", "\"\"\"Detect files to upload\"\"\" it_files = sorted(self.host_files) if files is", "= match_string.startswith('/') self.include = include @classmethod def parse(cls, match_strings: List[str])", "p = p.parent if p not in all_dirs: res.append(p) return", "return list(filter(lambda f: self.__match(f.as_posix()) == self.include, files)) def filter_download_files(self, files:", "for d in (output_dirs or [])]) if do_resolve: # autodetect", "# '/' in the end gets eaten away... fix for", "= pathlib.Path(path) # does file/dir exists? No attempt to copy", "include=True)) return res def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]: \"\"\"Filter", "to copy '/', leave it as it is... file_exists =", "potentially spaces, were are only interested about absolute path #", "archive name for uploaded file\"\"\" if cls.__use_absolute_path(file): h_file = file.resolve()", "marked for upload - must mean some sub directory for", "in files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not", "m in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True))", "List[pathlib.Path] = [] self.command_args = args.copy() # Additional punctuation chars,", "Dict, Set, Tuple, Iterable import shlex class FileMatcher: \"\"\"Match files", "+ len_s or value[i:i + len_s] != s: return False", "mean some sub directory for output p = file.parent while", "__file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check: bool = True) ->", "not in o_file.as_posix(): # the file does not exist, but", "is None: # make sure also paths leading to output", "args.copy() # Additional punctuation chars, whereas we might split command", "self.__match(rel_file) == self.include: res.append(file) return res def __match(self, value: str)", "is relative path to a file/directory... o_parent = o_file.parent while", "a_name else: return None def __analyze(self): \"\"\"Analyze the command line\"\"\"", "m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): \"\"\"Include files", "Set[pathlib.Path], parent_check: bool = True) -> Optional[str]: \"\"\" Method for", "-> bool: \"\"\"Should use absolute path to refer a file", "are processing this part later, because we can support special", "a list\"\"\" res = [] for m in match_strings: if", "% and & in here. \"\"\" o_file = pathlib.Path(path) #", "def __file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check: bool = True)", "pattern\"\"\" return list(filter(lambda f: self.__match(f.as_posix()) == self.include, files)) def filter_download_files(self,", "upload them without contents for dir in output_dirs or []:", "layer (not quoted) of arguments, is valid path and has", "cmd_args = self.command_args return cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] =", "p in file.parents: all_dirs.add(p) for file in filter(lambda f: not", "file_exists and not o_file.is_absolute() and '..' not in o_file.as_posix(): #", "\"\"\"Match files based on a pattern\"\"\" def __init__(self, match_string: str,", "this part later, because we can support special markups such", "parent_check and not \" \" in path: return None if", "parent directory, perhaps for output o_parent = o_parent.parent if file_exists:", "== self.include, files)) def filter_download_files(self, files: List[str], work_dir: str) ->", "p not in all_dirs: res.append(p) return res @classmethod def __archive_name_for(cls,", "= pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not self.include: res.append(file) continue if", "not exist, but it is relative path to a file/directory...", "is local match for file/directory, it is marked as uploadable", "if a_file.startswith('/') else a_file else: h_file = file a_file =", "match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True)) return res", "for up_file in self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file] =", "self.output_dirs: res.append(file) if files is None: # make sure also", "i += 1 while i < len(split): s = split[i]", "len(split): s = split[i] len_s = len(s) if len_s >", "f: self.__match(f.as_posix()) == self.include, files)) def filter_download_files(self, files: List[str], work_dir:", "self.original_args = args self.directory = directory self.host_files: List[pathlib.Path] = []", "we are processing this part later, because we can support", "if h_file not in already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/' in", "(output_dirs or [])]) if do_resolve: # autodetect input files self.__analyze()", "part later, because we can support special markups such as", "'/' for c in path]) # When filename contains potentially", "= [] # filter out files which do not exist", "= include @classmethod def parse(cls, match_strings: List[str]) -> List['FileMatcher']: \"\"\"Parse", "if len_s > 0: off = value.find(s, off) if off", "exists for file in it_files: if file.exists() or file in", "if file.exists() or file in self.output_dirs: res.append(file) if files is", "shlex basic) self.additional_punc_chars = \"=,\" # these are output directories,", "contents for dir in output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs =", "res else: # matching files relative to working directory res", "o_file.is_dir() and o_file not in self.output_dirs: # include files in", "def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): \"\"\"Resolve the files to upload\"\"\"", "'*' not in match_string self.absolute_path = match_string.startswith('/') self.include = include", "= o_file.parent while not file_exists and o_parent and o_parent.as_posix() !=", "off != len_v: return False return True class FileResolver: \"\"\"Resolve", "for file in files: if self.__match(file) == self.include: res.append(file) return", "s = split[0] len_s = len(s) if len_s > 0:", "path]) # When filename contains potentially spaces, were are only", "= filth.filter_upload_files(self.host_files) def __file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check: bool", "there is local match for file/directory, it is marked as", "self.__archive_name_for(o_file) if h_file not in already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/'", "path to refer a file path?\"\"\" # - use absolute", "end gets eaten away... fix for p in range(len(path) -", "NOTE: Shlex not Windows compatible! lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars)", "does file/dir exists? No attempt to copy '/', leave it", "else: h_file = file a_file = file.as_posix() return h_file, a_file", "working directory of container, when command is passed into container.", "existing parent directory, perhaps for output o_parent = o_parent.parent if", "return res def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]: \"\"\"Filter uploaded", "len_s = len(s) if len_s > 0: off = value.find(s,", "(On top of shlex basic) self.additional_punc_chars = \"=,\" # these", "m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True)) return res def filter_upload_files(self,", "spaces, were are only interested about absolute path # Not", "file_exists and o_parent and o_parent.as_posix() != '.': if o_parent.is_dir() and", "a_name = self.__file_exists(part, already_listed) if a_name: modified_paths.append((part, a_name)) for m_part,", "or value[i:i + len_s] != s: return False off +=", "use absolute paths, if /../ used (ok, quite weak) return", "if len_v < i + len_s or value[i:i + len_s]", "file_exists: h_file, a_name = self.__archive_name_for(o_file) if h_file not in already_listed:", "paths, if /../ used (ok, quite weak) return file.is_absolute() or", "match for file/directory, it is marked as uploadable file into", "\"\"\"Parse pattens from a list\"\"\" res = [] for m", "__match(self, value: str) -> bool: \"\"\"Match value with this pattern\"\"\"", "= set([pathlib.Path(d) for d in (output_dirs or [])]) if do_resolve:", "for c in path]) # When filename contains potentially spaces," ]
[ "This information is used when creating the *SDK*. info =", "info = { 'input_format': { 'session_id': 'str', 'field': 'str', 'value?':", "= { 'input_format': { 'session_id': 'str', 'field': 'str', 'value?': 'str',", "def do(data, resource): body = {} params = data['params'] user", "input output format of the function. # This information is", "from cloud.permission import Permission, NeedPermission from cloud.message import error #", "cloud.message import error # Define the input output format of", "body = {} params = data['params'] user = data['user'] user_id", "error.FORBIDDEN_MODIFICATION return body else: user[field] = value resource.db_update_item(user_id, user) body['user_id']", "'password_hash', 'salt', 'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return body else:", "'str', 'value?': 'str', }, 'output_format': { 'user_id?': 'str', }, 'description':", "= user['id'] field = params.get('field') value = params.get('value', None) user", "the *SDK*. info = { 'input_format': { 'session_id': 'str', 'field':", "*SDK*. info = { 'input_format': { 'session_id': 'str', 'field': 'str',", "used when creating the *SDK*. info = { 'input_format': {", "data['user'] user_id = user['id'] field = params.get('field') value = params.get('value',", "information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body = {} params", "cloud.permission import Permission, NeedPermission from cloud.message import error # Define", "information is used when creating the *SDK*. info = {", "'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return body else: user[field] =", "'description': 'Set my information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body", "if field in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']: body['error']", "'str', }, 'output_format': { 'user_id?': 'str', }, 'description': 'Set my", "error # Define the input output format of the function.", "'salt', 'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return body else: user[field]", "}, 'description': 'Set my information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource):", "return body else: user[field] = value resource.db_update_item(user_id, user) body['user_id'] =", "format of the function. # This information is used when", "body else: user[field] = value resource.db_update_item(user_id, user) body['user_id'] = user_id", "import error # Define the input output format of the", "'field': 'str', 'value?': 'str', }, 'output_format': { 'user_id?': 'str', },", "user = data['user'] user_id = user['id'] field = params.get('field') value", "{ 'session_id': 'str', 'field': 'str', 'value?': 'str', }, 'output_format': {", "value = params.get('value', None) user = resource.db_get_item(user_id) # For security", "when creating the *SDK*. info = { 'input_format': { 'session_id':", "user[field] = value resource.db_update_item(user_id, user) body['user_id'] = user_id return body", "'value?': 'str', }, 'output_format': { 'user_id?': 'str', }, 'description': 'Set", "import Permission, NeedPermission from cloud.message import error # Define the", "of the function. # This information is used when creating", "user['id'] field = params.get('field') value = params.get('value', None) user =", "'session_id': 'str', 'field': 'str', 'value?': 'str', }, 'output_format': { 'user_id?':", "'user_id?': 'str', }, 'description': 'Set my information' } @NeedPermission(Permission.Run.Auth.set_me) def", "'Set my information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body =", "field = params.get('field') value = params.get('value', None) user = resource.db_get_item(user_id)", "} @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body = {} params =", "resource): body = {} params = data['params'] user = data['user']", "data['params'] user = data['user'] user_id = user['id'] field = params.get('field')", "None) user = resource.db_get_item(user_id) # For security if field in", "else: user[field] = value resource.db_update_item(user_id, user) body['user_id'] = user_id return", "'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return body else: user[field] = value", "Permission, NeedPermission from cloud.message import error # Define the input", "do(data, resource): body = {} params = data['params'] user =", "= data['params'] user = data['user'] user_id = user['id'] field =", "params.get('value', None) user = resource.db_get_item(user_id) # For security if field", "from cloud.message import error # Define the input output format", "NeedPermission from cloud.message import error # Define the input output", "creating the *SDK*. info = { 'input_format': { 'session_id': 'str',", "{ 'user_id?': 'str', }, 'description': 'Set my information' } @NeedPermission(Permission.Run.Auth.set_me)", "params = data['params'] user = data['user'] user_id = user['id'] field", "is used when creating the *SDK*. info = { 'input_format':", "Define the input output format of the function. # This", "'str', 'field': 'str', 'value?': 'str', }, 'output_format': { 'user_id?': 'str',", "{} params = data['params'] user = data['user'] user_id = user['id']", "body['error'] = error.FORBIDDEN_MODIFICATION return body else: user[field] = value resource.db_update_item(user_id,", "= resource.db_get_item(user_id) # For security if field in ['id', 'email',", "# This information is used when creating the *SDK*. info", "= {} params = data['params'] user = data['user'] user_id =", "# For security if field in ['id', 'email', 'password_hash', 'salt',", "= data['user'] user_id = user['id'] field = params.get('field') value =", "user = resource.db_get_item(user_id) # For security if field in ['id',", "the function. # This information is used when creating the", "# Define the input output format of the function. #", "'email', 'password_hash', 'salt', 'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return body", "= params.get('value', None) user = resource.db_get_item(user_id) # For security if", "the input output format of the function. # This information", "output format of the function. # This information is used", "user_id = user['id'] field = params.get('field') value = params.get('value', None)", "my information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body = {}", "}, 'output_format': { 'user_id?': 'str', }, 'description': 'Set my information'", "@NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body = {} params = data['params']", "'input_format': { 'session_id': 'str', 'field': 'str', 'value?': 'str', }, 'output_format':", "'str', }, 'description': 'Set my information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data,", "'output_format': { 'user_id?': 'str', }, 'description': 'Set my information' }", "For security if field in ['id', 'email', 'password_hash', 'salt', 'groups',", "security if field in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']:", "= error.FORBIDDEN_MODIFICATION return body else: user[field] = value resource.db_update_item(user_id, user)", "{ 'input_format': { 'session_id': 'str', 'field': 'str', 'value?': 'str', },", "function. # This information is used when creating the *SDK*.", "field in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']: body['error'] =", "['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return", "in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION", "resource.db_get_item(user_id) # For security if field in ['id', 'email', 'password_hash',", "= params.get('field') value = params.get('value', None) user = resource.db_get_item(user_id) #", "params.get('field') value = params.get('value', None) user = resource.db_get_item(user_id) # For" ]
[ "Normally, code-specific utility code would belong in a code-specific ARMI", "code-specific ARMI plugin. But in this case, the need for", "nuclideBases as nb from armi import configure configure(permissive=True) _o, r", "code-specific utility code would belong in a code-specific ARMI plugin.", "in enumerate(bFuel, start=1): ndens = component.getNumberDensities() # convert nucName (str)", "nb from armi import configure configure(permissive=True) _o, r = test_reactors.loadTestReactor()", "from armi.reactor.flags import Flags from armi.utils.densityTools import formatMaterialCard from armi.nucDirectory", "========================= Here we load a test reactor and write each", "one fuel block out as MCNP material cards. Normally, code-specific", "from armi.reactor.tests import test_reactors from armi.reactor.flags import Flags from armi.utils.densityTools", "is so pervasive that it made it into the framework.", "write each component of one fuel block out as MCNP", "it made it into the framework. \"\"\" from armi.reactor.tests import", "ARMI plugin. But in this case, the need for MCNP", "<filename>doc/gallery-src/analysis/run_blockMcnpMaterialCard.py \"\"\" Write MCNP Material Cards ========================= Here we load", "test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel, start=1):", "and write each component of one fuel block out as", "= r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel, start=1): ndens =", "from armi.utils.densityTools import formatMaterialCard from armi.nucDirectory import nuclideBases as nb", "materials cards is so pervasive that it made it into", "nuclideBase keys ndensByBase = {nb.byName[nucName]: dens for nucName, dens in", "formatMaterialCard from armi.nucDirectory import nuclideBases as nb from armi import", "would belong in a code-specific ARMI plugin. But in this", "configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci,", "made it into the framework. \"\"\" from armi.reactor.tests import test_reactors", "= {nb.byName[nucName]: dens for nucName, dens in ndens.items()} print(\"\".join(formatMaterialCard(ndensByBase, matNum=ci)))", "test_reactors from armi.reactor.flags import Flags from armi.utils.densityTools import formatMaterialCard from", "(str) keys to nuclideBase keys ndensByBase = {nb.byName[nucName]: dens for", "r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component in", "Write MCNP Material Cards ========================= Here we load a test", "that it made it into the framework. \"\"\" from armi.reactor.tests", "of one fuel block out as MCNP material cards. Normally,", "load a test reactor and write each component of one", "this case, the need for MCNP materials cards is so", "armi.utils.densityTools import formatMaterialCard from armi.nucDirectory import nuclideBases as nb from", "MCNP material cards. Normally, code-specific utility code would belong in", "utility code would belong in a code-specific ARMI plugin. But", "test reactor and write each component of one fuel block", "keys to nuclideBase keys ndensByBase = {nb.byName[nucName]: dens for nucName,", "start=1): ndens = component.getNumberDensities() # convert nucName (str) keys to", "bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel, start=1): ndens", "pervasive that it made it into the framework. \"\"\" from", "component.getNumberDensities() # convert nucName (str) keys to nuclideBase keys ndensByBase", "Cards ========================= Here we load a test reactor and write", "as nb from armi import configure configure(permissive=True) _o, r =", "Here we load a test reactor and write each component", "convert nucName (str) keys to nuclideBase keys ndensByBase = {nb.byName[nucName]:", "armi.reactor.tests import test_reactors from armi.reactor.flags import Flags from armi.utils.densityTools import", "from armi.nucDirectory import nuclideBases as nb from armi import configure", "nucName (str) keys to nuclideBase keys ndensByBase = {nb.byName[nucName]: dens", "block out as MCNP material cards. Normally, code-specific utility code", "# convert nucName (str) keys to nuclideBase keys ndensByBase =", "\"\"\" from armi.reactor.tests import test_reactors from armi.reactor.flags import Flags from", "belong in a code-specific ARMI plugin. But in this case,", "r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel, start=1): ndens = component.getNumberDensities()", "component of one fuel block out as MCNP material cards.", "in a code-specific ARMI plugin. But in this case, the", "MCNP materials cards is so pervasive that it made it", "case, the need for MCNP materials cards is so pervasive", "ci, component in enumerate(bFuel, start=1): ndens = component.getNumberDensities() # convert", "armi.reactor.flags import Flags from armi.utils.densityTools import formatMaterialCard from armi.nucDirectory import", "import nuclideBases as nb from armi import configure configure(permissive=True) _o,", "= component.getNumberDensities() # convert nucName (str) keys to nuclideBase keys", "a test reactor and write each component of one fuel", "cards. Normally, code-specific utility code would belong in a code-specific", "code would belong in a code-specific ARMI plugin. But in", "so pervasive that it made it into the framework. \"\"\"", "in this case, the need for MCNP materials cards is", "Material Cards ========================= Here we load a test reactor and", "each component of one fuel block out as MCNP material", "ndensByBase = {nb.byName[nucName]: dens for nucName, dens in ndens.items()} print(\"\".join(formatMaterialCard(ndensByBase,", "armi import configure configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel =", "configure configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for", "\"\"\" Write MCNP Material Cards ========================= Here we load a", "fuel block out as MCNP material cards. Normally, code-specific utility", "import Flags from armi.utils.densityTools import formatMaterialCard from armi.nucDirectory import nuclideBases", "Flags from armi.utils.densityTools import formatMaterialCard from armi.nucDirectory import nuclideBases as", "import configure configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0]", "ndens = component.getNumberDensities() # convert nucName (str) keys to nuclideBase", "framework. \"\"\" from armi.reactor.tests import test_reactors from armi.reactor.flags import Flags", "But in this case, the need for MCNP materials cards", "the need for MCNP materials cards is so pervasive that", "to nuclideBase keys ndensByBase = {nb.byName[nucName]: dens for nucName, dens", "= test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel,", "import formatMaterialCard from armi.nucDirectory import nuclideBases as nb from armi", "into the framework. \"\"\" from armi.reactor.tests import test_reactors from armi.reactor.flags", "it into the framework. \"\"\" from armi.reactor.tests import test_reactors from", "keys ndensByBase = {nb.byName[nucName]: dens for nucName, dens in ndens.items()}", "MCNP Material Cards ========================= Here we load a test reactor", "from armi import configure configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel", "out as MCNP material cards. Normally, code-specific utility code would", "for MCNP materials cards is so pervasive that it made", "component in enumerate(bFuel, start=1): ndens = component.getNumberDensities() # convert nucName", "_o, r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component", "need for MCNP materials cards is so pervasive that it", "plugin. But in this case, the need for MCNP materials", "as MCNP material cards. Normally, code-specific utility code would belong", "we load a test reactor and write each component of", "reactor and write each component of one fuel block out", "for ci, component in enumerate(bFuel, start=1): ndens = component.getNumberDensities() #", "the framework. \"\"\" from armi.reactor.tests import test_reactors from armi.reactor.flags import", "enumerate(bFuel, start=1): ndens = component.getNumberDensities() # convert nucName (str) keys", "cards is so pervasive that it made it into the", "import test_reactors from armi.reactor.flags import Flags from armi.utils.densityTools import formatMaterialCard", "armi.nucDirectory import nuclideBases as nb from armi import configure configure(permissive=True)", "material cards. Normally, code-specific utility code would belong in a", "a code-specific ARMI plugin. But in this case, the need" ]
[ "JUN {}'.format(birth_year) if not db['yougest']: db['yougest'] = individual_id elif db[db['yougest']]['birth']", "if not last_name: last_name = names.get_last_name() birth_place = 'Paris' if", "individual_id elif db[db['yougest']]['birth'] < birth_year: db['yougest'] = individual_id db[individual_id]['string'] =", "import names import os import datetime from random import random", "k.startswith('@F'): gedcom_content += v['string'] gedcom_content += '0 TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..',", "random def generate_gedcom_file(): \"\"\"generate some gedcom file\"\"\" db = {}", "1 COPR Copyright (c) 2020 <NAME>,,,. 1 GEDC 2 VERS", "2020 <NAME>,,,. 1 GEDC 2 VERS 5.5 1 CHAR UTF-8", "'Rome' death_place = 'Zorge' if random() < 0.5 else 'Bruegge'", "== 'F' else None, children_ids) db[husband_id]['string'] += \"1 FAMS \"+family_id", "family_id def find_by_birth_date(db, from_year, to_year, sex, exclude=[]): ids = []", "int(random()*20) db[individual_id] = { 'birth': birth_year, 'death': death_year, 'sex': sex,", "children_ids[i] if db[children_ids[i] ]['sex'] == 'F' else None, children_ids) db[husband_id]['string']", "db[individual_id]['string'] = \"\"\"0 {individual_id} INDI 1 NAME {first_name} /{last_name}/ 1", "if random() < 0.5 else 'Rome' death_place = 'Zorge' if", "if not marriage_place: marriage_place = 'London' if random() < 0.5", "family_id = generate_family( db, husband_id, wife_id, children_ids, marriage_year) for i", "'F' else None, children_ids) db[husband_id]['string'] += \"1 FAMS \"+family_id +", "family_id if generations > 0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations -", "None def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5): if", "if random() < 0.5 else 'M' first_name = names.get_first_name( gender='male'", "random import random def generate_gedcom_file(): \"\"\"generate some gedcom file\"\"\" db", "JUN {}'.format(birth_year) death_date = '1 JUN {}'.format(birth_year) if not db['yougest']:", "if k.startswith('@F'): gedcom_content += v['string'] gedcom_content += '0 TRLR\\n' open(os.path.join(os.path.dirname(__file__),", "not db['yougest']: db['yougest'] = individual_id elif db[db['yougest']]['birth'] < birth_year: db['yougest']", "\"+family_id + '\\n' generate_recursive_family(db, generations=8, max_children=4) for k, v in", "DEAT 2 DATE {death_date} 2 PLAC {death_place} \"\"\".format(**locals()) return individual_id", "if random() < 0.5 else 'Tokio' db['n_families'] += 1 marriage_date", "+ 40 + int(random()*20) db[individual_id] = { 'birth': birth_year, 'death':", "\"@F{}@\".format(db['n_families']) db[family_id] = {'string': \"\"\"0 {family_id} FAM 1 HUSB {husband_id}", "in db.items(): if k.startswith('@F'): gedcom_content += v['string'] gedcom_content += '0", "data: if data['birth'] > from_year and data['birth'] < to_year: if", "CHIL {}\\n\".format(child_id) return family_id def find_by_birth_date(db, from_year, to_year, sex, exclude=[]):", "sex == data['sex']: if individual_id not in exclude: ids.append(individual_id) if", "birth_year, 'death': death_year, 'sex': sex, 'last_name': last_name } birth_date =", "return individual_id def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None): if", "'\\n' generate_recursive_family(db, generations=8, max_children=4) for k, v in db.items(): if", "= birth_year + 40 + int(random()*20) db[individual_id] = { 'birth':", "= 'Zorge' if random() < 0.5 else 'Bruegge' db['n_individuals'] +=", "\"\"\".format(datetime.date.today()) def generate_individual(db, birth_year, sex=None, last_name=None): if not sex: sex", "Gramps 1 DATE {} 2 TIME 15:35:24 1 SUBM @SUBM@", "[] for individual_id, data in db.items(): if not individual_id.startswith('@I'): continue", "+ 20 + int(random()*5) children_ids = [] for i in", "db, husband_id, wife_id, children_ids, marriage_year) for i in range(n_children): db[children_ids[i]]['string']", "for child_id in children_ids: db[family_id]['string'] += \"1 CHIL {}\\n\".format(child_id) return", "start_year + int(random()*5), sex='M') else: print('reused {}'.format(husband_id)) if not wife_id:", "= generate_individual( db, start_year + int(random()*5), sex='M') else: print('reused {}'.format(husband_id))", "generations > 0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations - 1, children_ids[i]", "marriage_place = 'London' if random() < 0.5 else 'Tokio' db['n_families']", "individual_id, data in db.items(): if not individual_id.startswith('@I'): continue if 'famc'", "size, color=color) d = ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos,", "LANG German \"\"\".format(datetime.date.today()) def generate_individual(db, birth_year, sex=None, last_name=None): if not", "0), font=font) img.save(filename) for i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4", "to_year, sex, exclude=[]): ids = [] for individual_id, data in", "= individual_id db[individual_id]['string'] = \"\"\"0 {individual_id} INDI 1 NAME {first_name}", "v['string'] for k, v in db.items(): if k.startswith('@F'): gedcom_content +=", "= '1 JUN {}'.format(birth_year) death_date = '1 JUN {}'.format(birth_year) if", "< 0.5 else 'M' first_name = names.get_first_name( gender='male' if sex", "1 marriage_date = '1 MAY {}'.format(marriage_year) family_id = \"@F{}@\".format(db['n_families']) db[family_id]", "1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family( db, husband_id, wife_id,", "0.5 else 'Bruegge' db['n_individuals'] += 1 individual_id = '@I{}@'.format(db[\"n_individuals\"]) death_year", "db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family(", "ImageDraw, ImageFont def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100),", "def generate_individual_images(): from PIL import Image, ImageDraw, ImageFont def generate_one_image(filename,", "db, start_year, start_year + 10, sex='F', exclude=exclude) if not wife_id:", "\"\"\".format( **locals() )} for child_id in children_ids: db[family_id]['string'] += \"1", "db['max_individuals'] = 8000 db['n_families'] = 0 db['yougest'] = None gedcom_content", "WIFE {wife_id} 1 MARR 2 DATE {marriage_date} 2 PLAC {marriage_place}", "data['birth'] > from_year and data['birth'] < to_year: if sex ==", "last_name = names.get_last_name() birth_place = 'Paris' if random() < 0.5", "< birth_year: db['yougest'] = individual_id db[individual_id]['string'] = \"\"\"0 {individual_id} INDI", "generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None): if not marriage_place: marriage_place", "]['sex'] == 'F' else None, children_ids) db[husband_id]['string'] += \"1 FAMS", "last_name=None): if not sex: sex = 'F' if random() <", "def generate_gedcom_file(): \"\"\"generate some gedcom file\"\"\" db = {} db['n_individuals']", "40 + int(random()*20) db[individual_id] = { 'birth': birth_year, 'death': death_year,", "= 'F' if random() < 0.5 else 'M' first_name =", "in exclude: ids.append(individual_id) if ids: return ids[int(random()*len(ids))] return None def", "1 DATE {} 2 TIME 15:35:24 1 SUBM @SUBM@ 1", "{birth_place} 1 DEAT 2 DATE {death_date} 2 PLAC {death_place} \"\"\".format(**locals())", "{death_date} 2 PLAC {death_place} \"\"\".format(**locals()) return individual_id def generate_family(db, husband_id,", "\"\"\"0 {family_id} FAM 1 HUSB {husband_id} 1 WIFE {wife_id} 1", "+ 10, sex='F', exclude=exclude) if not wife_id: wife_id = generate_individual(", "db.items(): if k.startswith('@F'): gedcom_content += v['string'] gedcom_content += '0 TRLR\\n'", "siblings=[], max_children=5): if not husband_id: if random() < 0.2: exclude", "**locals() )} for child_id in children_ids: db[family_id]['string'] += \"1 CHIL", "i in range(n_children): db[children_ids[i]]['string'] += \"1 FAMC \"+family_id + '\\n'", "+= v['string'] gedcom_content += '0 TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'),", "range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ), 'Age {}'.format( 1+i*4, )) generate_individual_images()", "DATE {} 2 TIME 15:35:24 1 SUBM @SUBM@ 1 COPR", "{'string': \"\"\"0 {family_id} FAM 1 HUSB {husband_id} 1 WIFE {wife_id}", "+= \"1 FAMS \"+family_id + '\\n' db[wife_id]['string'] += \"1 FAMS", "from random import random def generate_gedcom_file(): \"\"\"generate some gedcom file\"\"\"", "0.5 else 'Rome' death_place = 'Zorge' if random() < 0.5", "start_year + 20 + int(random()*5) children_ids = [] for i", "exclude=exclude) if not husband_id: husband_id = generate_individual( db, start_year +", "int(random()*5) children_ids = [] for i in range(n_children): children_ids.append(generate_individual( db,", "+= 1 marriage_date = '1 MAY {}'.format(marriage_year) family_id = \"@F{}@\".format(db['n_families'])", "if random() < 0.5 else 'Bruegge' db['n_individuals'] += 1 individual_id", "family_id = \"@F{}@\".format(db['n_families']) db[family_id] = {'string': \"\"\"0 {family_id} FAM 1", "1 MARR 2 DATE {marriage_date} 2 PLAC {marriage_place} \"\"\".format( **locals()", "img.save(filename) for i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ), 'Age", "siblings.copy() if wife_id: exclude += [wife_id] husband_id = find_by_birth_date( db,", "from_year, to_year, sex, exclude=[]): ids = [] for individual_id, data", "< to_year: if sex == data['sex']: if individual_id not in", "datetime from random import random def generate_gedcom_file(): \"\"\"generate some gedcom", "last_name: last_name = names.get_last_name() birth_place = 'Paris' if random() <", "= 'Paris' if random() < 0.5 else 'Rome' death_place =", "if sex == 'M' else 'female') if random() < 0.3:", "in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ), 'Age {}'.format( 1+i*4, ))", "gedcom_content += v['string'] for k, v in db.items(): if k.startswith('@F'):", "HEAD 1 SOUR Gramps 2 VERS 3.3.0 2 NAME Gramps", "{}'.format(birth_year) death_date = '1 JUN {}'.format(birth_year) if not db['yougest']: db['yougest']", "if not husband_id: husband_id = generate_individual( db, start_year + int(random()*5),", "\"+family_id + '\\n' db[children_ids[i]]['famc'] = family_id if generations > 0:", "children_ids: db[family_id]['string'] += \"1 CHIL {}\\n\".format(child_id) return family_id def find_by_birth_date(db,", "1 BIRT 2 DATE {birth_date} 2 PLAC {birth_place} 1 DEAT", "0 db['yougest'] = None gedcom_content = \"\"\" 0 HEAD 1", "find_by_birth_date( db, start_year, start_year + 10, sex='F', exclude=exclude) if not", "SEX {sex} 1 BIRT 2 DATE {birth_date} 2 PLAC {birth_place}", "marriage_year = start_year + 20 + int(random()*5) children_ids = []", "marriage_place=None): if not marriage_place: marriage_place = 'London' if random() <", "+ '\\n' generate_recursive_family(db, generations=8, max_children=4) for k, v in db.items():", "import Image, ImageDraw, ImageFont def generate_one_image(filename, text, font_size=22, pos=(15, 40),", "= names.get_last_name() birth_place = 'Paris' if random() < 0.5 else", "from_year and data['birth'] < to_year: if sex == data['sex']: if", "\"+family_id + '\\n' db[wife_id]['string'] += \"1 FAMS \"+family_id + '\\n'", "= siblings.copy() + [husband_id] wife_id = find_by_birth_date( db, start_year, start_year", "for k, v in db.items(): if k.startswith('@I'): gedcom_content += v['string']", "last_name=db[husband_id]['last_name'])) family_id = generate_family( db, husband_id, wife_id, children_ids, marriage_year) for", "data['sex']: if individual_id not in exclude: ids.append(individual_id) if ids: return", "- db['n_individuals'] / db['max_individuals'])) marriage_year = start_year + 20 +", "None gedcom_content = \"\"\" 0 HEAD 1 SOUR Gramps 2", "in data: if data['birth'] > from_year and data['birth'] < to_year:", "birth_date = '1 JUN {}'.format(birth_year) death_date = '1 JUN {}'.format(birth_year)", "= individual_id elif db[db['yougest']]['birth'] < birth_year: db['yougest'] = individual_id db[individual_id]['string']", "* (1 - db['n_individuals'] / db['max_individuals'])) marriage_year = start_year +", "def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5): if not", "NAME {first_name} /{last_name}/ 1 SEX {sex} 1 BIRT 2 DATE", "{}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) * (1 - db['n_individuals'] / db['max_individuals']))", "generations=8, max_children=4) for k, v in db.items(): if k.startswith('@I'): gedcom_content", "start_year + int(random()*5), sex='F') else: print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1))", "birth_year: db['yougest'] = individual_id db[individual_id]['string'] = \"\"\"0 {individual_id} INDI 1", "start_year, start_year + 10, sex='F', exclude=exclude) if not wife_id: wife_id", "< 0.5 else 'Rome' death_place = 'Zorge' if random() <", "[] for i in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year + 1", "color=(160, 160, 160)): img = Image.new('RGB', size, color=color) d =", "start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5): if not husband_id: if", "generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5): if not husband_id:", "2 TIME 15:35:24 1 SUBM @SUBM@ 1 COPR Copyright (c)", "None, children_ids) db[husband_id]['string'] += \"1 FAMS \"+family_id + '\\n' db[wife_id]['string']", "death_date = '1 JUN {}'.format(birth_year) if not db['yougest']: db['yougest'] =", "160, 160)): img = Image.new('RGB', size, color=color) d = ImageDraw.Draw(img)", "'1 JUN {}'.format(birth_year) if not db['yougest']: db['yougest'] = individual_id elif", "else 'Tokio' db['n_families'] += 1 marriage_date = '1 MAY {}'.format(marriage_year)", "db[family_id]['string'] += \"1 CHIL {}\\n\".format(child_id) return family_id def find_by_birth_date(db, from_year,", "db[individual_id] = { 'birth': birth_year, 'death': death_year, 'sex': sex, 'last_name':", "last_name } birth_date = '1 JUN {}'.format(birth_year) death_date = '1", "v in db.items(): if k.startswith('@I'): gedcom_content += v['string'] for k,", "== data['sex']: if individual_id not in exclude: ids.append(individual_id) if ids:", "/{last_name}/ 1 SEX {sex} 1 BIRT 2 DATE {birth_date} 2", "font=font) img.save(filename) for i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ),", "\"1 FAMS \"+family_id + '\\n' generate_recursive_family(db, generations=8, max_children=4) for k,", "{wife_id} 1 MARR 2 DATE {marriage_date} 2 PLAC {marriage_place} \"\"\".format(", "'M' first_name = names.get_first_name( gender='male' if sex == 'M' else", "if not db['yougest']: db['yougest'] = individual_id elif db[db['yougest']]['birth'] < birth_year:", "first_name = names.get_first_name( gender='male' if sex == 'M' else 'female')", "INDI 1 NAME {first_name} /{last_name}/ 1 SEX {sex} 1 BIRT", "< 0.3: first_name += ' ' + \\ names.get_first_name(gender='male' if", "\"\"\"generate some gedcom file\"\"\" db = {} db['n_individuals'] = 0", "color=color) d = ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text,", "font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0, 0, 0), font=font)", "fill=(0, 0, 0), font=font) img.save(filename) for i in range(20): generate_one_image(", "{}'.format(birth_year) if not db['yougest']: db['yougest'] = individual_id elif db[db['yougest']]['birth'] <", "+ \\ names.get_first_name(gender='male' if sex == 'M' else 'female') if", "{}'.format(husband_id)) if not wife_id: if random() < 10.9: exclude =", "+= \"1 CHIL {}\\n\".format(child_id) return family_id def find_by_birth_date(db, from_year, to_year,", "0.5 else 'Tokio' db['n_families'] += 1 marriage_date = '1 MAY", "0.5 else 'M' first_name = names.get_first_name( gender='male' if sex ==", "= { 'birth': birth_year, 'death': death_year, 'sex': sex, 'last_name': last_name", "= {'string': \"\"\"0 {family_id} FAM 1 HUSB {husband_id} 1 WIFE", "+ int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family( db, husband_id, wife_id, children_ids,", "if not wife_id: wife_id = generate_individual( db, start_year + int(random()*5),", "db['yougest'] = None gedcom_content = \"\"\" 0 HEAD 1 SOUR", ")} for child_id in children_ids: db[family_id]['string'] += \"1 CHIL {}\\n\".format(child_id)", "< 0.2: exclude = siblings.copy() if wife_id: exclude += [wife_id]", "db, start_year + int(random()*5), sex='M') else: print('reused {}'.format(husband_id)) if not", "= generate_individual( db, start_year + int(random()*5), sex='F') else: print('reused {}'.format(wife_id))", "i in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year + 1 + int(random()*10),", "'\\n' db[wife_id]['string'] += \"1 FAMS \"+family_id + '\\n' generate_recursive_family(db, generations=8,", "generate_gedcom_file() def generate_individual_images(): from PIL import Image, ImageDraw, ImageFont def", "VERS 5.5 1 CHAR UTF-8 1 LANG German \"\"\".format(datetime.date.today()) def", "to_year: if sex == data['sex']: if individual_id not in exclude:", "= ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0, 0,", "text, font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160, 160)): img", "+ '\\n' db[wife_id]['string'] += \"1 FAMS \"+family_id + '\\n' generate_recursive_family(db,", "+ int(random()*5) children_ids = [] for i in range(n_children): children_ids.append(generate_individual(", "db['n_families'] += 1 marriage_date = '1 MAY {}'.format(marriage_year) family_id =", "{family_id} FAM 1 HUSB {husband_id} 1 WIFE {wife_id} 1 MARR", "db = {} db['n_individuals'] = 0 db['max_individuals'] = 8000 db['n_families']", "file\"\"\" db = {} db['n_individuals'] = 0 db['max_individuals'] = 8000", "'..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images(): from PIL", "start_year + 10, sex='M', exclude=exclude) if not husband_id: husband_id =", "individual_id = '@I{}@'.format(db[\"n_individuals\"]) death_year = birth_year + 40 + int(random()*20)", "+= \"1 FAMC \"+family_id + '\\n' db[children_ids[i]]['famc'] = family_id if", "== 'M' else 'female') if random() < 0.3: first_name +=", "names import os import datetime from random import random def", "not sex: sex = 'F' if random() < 0.5 else", "} birth_date = '1 JUN {}'.format(birth_year) death_date = '1 JUN", "= '@I{}@'.format(db[\"n_individuals\"]) death_year = birth_year + 40 + int(random()*20) db[individual_id]", "2 PLAC {birth_place} 1 DEAT 2 DATE {death_date} 2 PLAC", "Gramps 2 VERS 3.3.0 2 NAME Gramps 1 DATE {}", "TIME 15:35:24 1 SUBM @SUBM@ 1 COPR Copyright (c) 2020", "German \"\"\".format(datetime.date.today()) def generate_individual(db, birth_year, sex=None, last_name=None): if not sex:", "size=(100, 100), color=(160, 160, 160)): img = Image.new('RGB', size, color=color)", "for k, v in db.items(): if k.startswith('@F'): gedcom_content += v['string']", "1 HUSB {husband_id} 1 WIFE {wife_id} 1 MARR 2 DATE", "sex: sex = 'F' if random() < 0.5 else 'M'", "else 'M' first_name = names.get_first_name( gender='male' if sex == 'M'", "'female') if random() < 0.3: first_name += ' ' +", "husband_id = find_by_birth_date( db, start_year, start_year + 10, sex='M', exclude=exclude)", "== 'M' else 'female') if not last_name: last_name = names.get_last_name()", "'birth': birth_year, 'death': death_year, 'sex': sex, 'last_name': last_name } birth_date", "if db[children_ids[i] ]['sex'] == 'F' else None, children_ids) db[husband_id]['string'] +=", "k, v in db.items(): if k.startswith('@I'): gedcom_content += v['string'] for", "0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations - 1, children_ids[i] if db[children_ids[i]", "db['max_individuals'])) marriage_year = start_year + 20 + int(random()*5) children_ids =", "if random() < 0.2: exclude = siblings.copy() if wife_id: exclude", "0.2: exclude = siblings.copy() if wife_id: exclude += [wife_id] husband_id", "open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images(): from", "\"1 CHIL {}\\n\".format(child_id) return family_id def find_by_birth_date(db, from_year, to_year, sex,", "generate_recursive_family(db, generations=8, max_children=4) for k, v in db.items(): if k.startswith('@I'):", "from PIL import Image, ImageDraw, ImageFont def generate_one_image(filename, text, font_size=22,", "gedcom file\"\"\" db = {} db['n_individuals'] = 0 db['max_individuals'] =", "if db[children_ids[i] ]['sex'] == 'M' else None, children_ids[i] if db[children_ids[i]", "birth_year + 40 + int(random()*20) db[individual_id] = { 'birth': birth_year,", "[husband_id] wife_id = find_by_birth_date( db, start_year, start_year + 10, sex='F',", "find_by_birth_date(db, from_year, to_year, sex, exclude=[]): ids = [] for individual_id,", "db.items(): if k.startswith('@I'): gedcom_content += v['string'] for k, v in", "import random def generate_gedcom_file(): \"\"\"generate some gedcom file\"\"\" db =", "ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0, 0, 0),", "CHAR UTF-8 1 LANG German \"\"\".format(datetime.date.today()) def generate_individual(db, birth_year, sex=None,", "not wife_id: wife_id = generate_individual( db, start_year + int(random()*5), sex='F')", "husband_id, wife_id, children_ids, marriage_year, marriage_place=None): if not marriage_place: marriage_place =", "random() < 0.3: first_name += ' ' + \\ names.get_first_name(gender='male'", "sex, 'last_name': last_name } birth_date = '1 JUN {}'.format(birth_year) death_date", "{individual_id} INDI 1 NAME {first_name} /{last_name}/ 1 SEX {sex} 1", "data['birth'] < to_year: if sex == data['sex']: if individual_id not", "= 8000 db['n_families'] = 0 db['yougest'] = None gedcom_content =", "n_children = int((1+random()*(max_children-1)) * (1 - db['n_individuals'] / db['max_individuals'])) marriage_year", "Image.new('RGB', size, color=color) d = ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size)", "husband_id=None, wife_id=None, siblings=[], max_children=5): if not husband_id: if random() <", "NAME Gramps 1 DATE {} 2 TIME 15:35:24 1 SUBM", "wife_id=None, siblings=[], max_children=5): if not husband_id: if random() < 0.2:", "husband_id = generate_individual( db, start_year + int(random()*5), sex='M') else: print('reused", "10.9: exclude = siblings.copy() + [husband_id] wife_id = find_by_birth_date( db,", "wife_id = generate_individual( db, start_year + int(random()*5), sex='F') else: print('reused", "else 'Rome' death_place = 'Zorge' if random() < 0.5 else", "os import datetime from random import random def generate_gedcom_file(): \"\"\"generate", "2 VERS 5.5 1 CHAR UTF-8 1 LANG German \"\"\".format(datetime.date.today())", "'London' if random() < 0.5 else 'Tokio' db['n_families'] += 1", "random() < 0.5 else 'Tokio' db['n_families'] += 1 marriage_date =", "> 0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations - 1, children_ids[i] if", "= \"@F{}@\".format(db['n_families']) db[family_id] = {'string': \"\"\"0 {family_id} FAM 1 HUSB", "sex == 'M' else 'female') if random() < 0.3: first_name", "db['n_individuals'] = 0 db['max_individuals'] = 8000 db['n_families'] = 0 db['yougest']", "PIL import Image, ImageDraw, ImageFont def generate_one_image(filename, text, font_size=22, pos=(15,", "continue if 'famc' in data: if data['birth'] > from_year and", "DATE {marriage_date} 2 PLAC {marriage_place} \"\"\".format( **locals() )} for child_id", "not wife_id: if random() < 10.9: exclude = siblings.copy() +", "< 10.9: exclude = siblings.copy() + [husband_id] wife_id = find_by_birth_date(", "wife_id = find_by_birth_date( db, start_year, start_year + 10, sex='F', exclude=exclude)", "db.items(): if not individual_id.startswith('@I'): continue if 'famc' in data: if", "children_ids = [] for i in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year", "2 DATE {death_date} 2 PLAC {death_place} \"\"\".format(**locals()) return individual_id def", "random() < 0.5 else 'Rome' death_place = 'Zorge' if random()", "in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name']))", "+ '\\n' db[children_ids[i]]['famc'] = family_id if generations > 0: generate_recursive_family(", "ids: return ids[int(random()*len(ids))] return None def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None,", "PLAC {death_place} \"\"\".format(**locals()) return individual_id def generate_family(db, husband_id, wife_id, children_ids,", "COPR Copyright (c) 2020 <NAME>,,,. 1 GEDC 2 VERS 5.5", "40), size=(100, 100), color=(160, 160, 160)): img = Image.new('RGB', size,", "d.text(pos, text, fill=(0, 0, 0), font=font) img.save(filename) for i in", "> from_year and data['birth'] < to_year: if sex == data['sex']:", "not husband_id: husband_id = generate_individual( db, start_year + int(random()*5), sex='M')", "individual_id.startswith('@I'): continue if 'famc' in data: if data['birth'] > from_year", "generate_recursive_family( db, db[children_ids[i]]['birth'], generations - 1, children_ids[i] if db[children_ids[i] ]['sex']", "death_year, 'sex': sex, 'last_name': last_name } birth_date = '1 JUN", "d = ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0,", "children_ids, marriage_year) for i in range(n_children): db[children_ids[i]]['string'] += \"1 FAMC", "else None, children_ids[i] if db[children_ids[i] ]['sex'] == 'F' else None,", "db['yougest'] = individual_id elif db[db['yougest']]['birth'] < birth_year: db['yougest'] = individual_id", "max_children=4) for k, v in db.items(): if k.startswith('@I'): gedcom_content +=", "'Bruegge' db['n_individuals'] += 1 individual_id = '@I{}@'.format(db[\"n_individuals\"]) death_year = birth_year", "+= '0 TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file()", "= '1 JUN {}'.format(birth_year) if not db['yougest']: db['yougest'] = individual_id", "= \"\"\" 0 HEAD 1 SOUR Gramps 2 VERS 3.3.0", "generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5): if not husband_id: if random()", "'Tokio' db['n_families'] += 1 marriage_date = '1 MAY {}'.format(marriage_year) family_id", "HUSB {husband_id} 1 WIFE {wife_id} 1 MARR 2 DATE {marriage_date}", "range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id", "sex='M', exclude=exclude) if not husband_id: husband_id = generate_individual( db, start_year", "1 DEAT 2 DATE {death_date} 2 PLAC {death_place} \"\"\".format(**locals()) return", "import datetime from random import random def generate_gedcom_file(): \"\"\"generate some", "and data['birth'] < to_year: if sex == data['sex']: if individual_id", "font_size) d.text(pos, text, fill=(0, 0, 0), font=font) img.save(filename) for i", "VERS 3.3.0 2 NAME Gramps 1 DATE {} 2 TIME", "names.get_first_name(gender='male' if sex == 'M' else 'female') if not last_name:", "start_year + 10, sex='F', exclude=exclude) if not wife_id: wife_id =", "generate_individual( db, start_year + int(random()*5), sex='M') else: print('reused {}'.format(husband_id)) if", "if random() < 0.3: first_name += ' ' + \\", "else 'female') if not last_name: last_name = names.get_last_name() birth_place =", "generate_gedcom_file(): \"\"\"generate some gedcom file\"\"\" db = {} db['n_individuals'] =", "]['sex'] == 'M' else None, children_ids[i] if db[children_ids[i] ]['sex'] ==", "FAMS \"+family_id + '\\n' generate_recursive_family(db, generations=8, max_children=4) for k, v", "db[children_ids[i] ]['sex'] == 'M' else None, children_ids[i] if db[children_ids[i] ]['sex']", "if ids: return ids[int(random()*len(ids))] return None def generate_recursive_family(db, start_year=1000, generations=2,", "db['yougest']: db['yougest'] = individual_id elif db[db['yougest']]['birth'] < birth_year: db['yougest'] =", "\"1 FAMC \"+family_id + '\\n' db[children_ids[i]]['famc'] = family_id if generations", "BIRT 2 DATE {birth_date} 2 PLAC {birth_place} 1 DEAT 2", "'0 TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def", "'\\n' db[children_ids[i]]['famc'] = family_id if generations > 0: generate_recursive_family( db,", "db['yougest'] = individual_id db[individual_id]['string'] = \"\"\"0 {individual_id} INDI 1 NAME", "ImageFont def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100), color=(160,", "= [] for i in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year +", "children_ids[i] if db[children_ids[i] ]['sex'] == 'M' else None, children_ids[i] if", "print('reused {}'.format(husband_id)) if not wife_id: if random() < 10.9: exclude", "'@I{}@'.format(db[\"n_individuals\"]) death_year = birth_year + 40 + int(random()*20) db[individual_id] =", "1, children_ids[i] if db[children_ids[i] ]['sex'] == 'M' else None, children_ids[i]", "if 'famc' in data: if data['birth'] > from_year and data['birth']", "= 0 db['yougest'] = None gedcom_content = \"\"\" 0 HEAD", "if not individual_id.startswith('@I'): continue if 'famc' in data: if data['birth']", "if data['birth'] > from_year and data['birth'] < to_year: if sex", "+ int(random()*5), sex='F') else: print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) *", "db[children_ids[i]]['birth'], generations - 1, children_ids[i] if db[children_ids[i] ]['sex'] == 'M'", "elif db[db['yougest']]['birth'] < birth_year: db['yougest'] = individual_id db[individual_id]['string'] = \"\"\"0", "8000 db['n_families'] = 0 db['yougest'] = None gedcom_content = \"\"\"", "wife_id: wife_id = generate_individual( db, start_year + int(random()*5), sex='F') else:", "1 LANG German \"\"\".format(datetime.date.today()) def generate_individual(db, birth_year, sex=None, last_name=None): if", "SUBM @SUBM@ 1 COPR Copyright (c) 2020 <NAME>,,,. 1 GEDC", "husband_id: if random() < 0.2: exclude = siblings.copy() if wife_id:", "pos=(15, 40), size=(100, 100), color=(160, 160, 160)): img = Image.new('RGB',", "100), color=(160, 160, 160)): img = Image.new('RGB', size, color=color) d", "return ids[int(random()*len(ids))] return None def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None,", "db[children_ids[i] ]['sex'] == 'F' else None, children_ids) db[husband_id]['string'] += \"1", "exclude += [wife_id] husband_id = find_by_birth_date( db, start_year, start_year +", "generate_family( db, husband_id, wife_id, children_ids, marriage_year) for i in range(n_children):", "if sex == data['sex']: if individual_id not in exclude: ids.append(individual_id)", "15:35:24 1 SUBM @SUBM@ 1 COPR Copyright (c) 2020 <NAME>,,,.", "10, sex='M', exclude=exclude) if not husband_id: husband_id = generate_individual( db,", "individual_id not in exclude: ids.append(individual_id) if ids: return ids[int(random()*len(ids))] return", "wife_id, children_ids, marriage_year, marriage_place=None): if not marriage_place: marriage_place = 'London'", "death_year = birth_year + 40 + int(random()*20) db[individual_id] = {", "' ' + \\ names.get_first_name(gender='male' if sex == 'M' else", "(1 - db['n_individuals'] / db['max_individuals'])) marriage_year = start_year + 20", "def find_by_birth_date(db, from_year, to_year, sex, exclude=[]): ids = [] for", "{ 'birth': birth_year, 'death': death_year, 'sex': sex, 'last_name': last_name }", "birth_year, sex=None, last_name=None): if not sex: sex = 'F' if", "children_ids.append(generate_individual( db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id =", "if generations > 0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations - 1,", "PLAC {birth_place} 1 DEAT 2 DATE {death_date} 2 PLAC {death_place}", "text, fill=(0, 0, 0), font=font) img.save(filename) for i in range(20):", "'death': death_year, 'sex': sex, 'last_name': last_name } birth_date = '1", "= None gedcom_content = \"\"\" 0 HEAD 1 SOUR Gramps", "individual_id db[individual_id]['string'] = \"\"\"0 {individual_id} INDI 1 NAME {first_name} /{last_name}/", "children_ids, marriage_year, marriage_place=None): if not marriage_place: marriage_place = 'London' if", "marriage_place: marriage_place = 'London' if random() < 0.5 else 'Tokio'", "10, sex='F', exclude=exclude) if not wife_id: wife_id = generate_individual( db,", "int(random()*5), sex='M') else: print('reused {}'.format(husband_id)) if not wife_id: if random()", "+= [wife_id] husband_id = find_by_birth_date( db, start_year, start_year + 10,", "<NAME>,,,. 1 GEDC 2 VERS 5.5 1 CHAR UTF-8 1", "= int((1+random()*(max_children-1)) * (1 - db['n_individuals'] / db['max_individuals'])) marriage_year =", "FAMS \"+family_id + '\\n' db[wife_id]['string'] += \"1 FAMS \"+family_id +", "' + \\ names.get_first_name(gender='male' if sex == 'M' else 'female')", "1 SEX {sex} 1 BIRT 2 DATE {birth_date} 2 PLAC", "= start_year + 20 + int(random()*5) children_ids = [] for", "= \"\"\"0 {individual_id} INDI 1 NAME {first_name} /{last_name}/ 1 SEX", "sex = 'F' if random() < 0.5 else 'M' first_name", "1 CHAR UTF-8 1 LANG German \"\"\".format(datetime.date.today()) def generate_individual(db, birth_year,", "160)): img = Image.new('RGB', size, color=color) d = ImageDraw.Draw(img) font", "2 VERS 3.3.0 2 NAME Gramps 1 DATE {} 2", "img = Image.new('RGB', size, color=color) d = ImageDraw.Draw(img) font =", "UTF-8 1 LANG German \"\"\".format(datetime.date.today()) def generate_individual(db, birth_year, sex=None, last_name=None):", "else: print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) * (1 - db['n_individuals']", "v in db.items(): if k.startswith('@F'): gedcom_content += v['string'] gedcom_content +=", "exclude=exclude) if not wife_id: wife_id = generate_individual( db, start_year +", "'1 MAY {}'.format(marriage_year) family_id = \"@F{}@\".format(db['n_families']) db[family_id] = {'string': \"\"\"0", "FAM 1 HUSB {husband_id} 1 WIFE {wife_id} 1 MARR 2", "2 NAME Gramps 1 DATE {} 2 TIME 15:35:24 1", "1 WIFE {wife_id} 1 MARR 2 DATE {marriage_date} 2 PLAC", "Copyright (c) 2020 <NAME>,,,. 1 GEDC 2 VERS 5.5 1", "= [] for individual_id, data in db.items(): if not individual_id.startswith('@I'):", "random() < 0.2: exclude = siblings.copy() if wife_id: exclude +=", "wife_id: if random() < 10.9: exclude = siblings.copy() + [husband_id]", "marriage_year) for i in range(n_children): db[children_ids[i]]['string'] += \"1 FAMC \"+family_id", "{marriage_date} 2 PLAC {marriage_place} \"\"\".format( **locals() )} for child_id in", "0 db['max_individuals'] = 8000 db['n_families'] = 0 db['yougest'] = None", "find_by_birth_date( db, start_year, start_year + 10, sex='M', exclude=exclude) if not", "db[db['yougest']]['birth'] < birth_year: db['yougest'] = individual_id db[individual_id]['string'] = \"\"\"0 {individual_id}", "5.5 1 CHAR UTF-8 1 LANG German \"\"\".format(datetime.date.today()) def generate_individual(db,", "2 DATE {marriage_date} 2 PLAC {marriage_place} \"\"\".format( **locals() )} for", "if k.startswith('@I'): gedcom_content += v['string'] for k, v in db.items():", "= 0 db['max_individuals'] = 8000 db['n_families'] = 0 db['yougest'] =", "random() < 10.9: exclude = siblings.copy() + [husband_id] wife_id =", "generations - 1, children_ids[i] if db[children_ids[i] ]['sex'] == 'M' else", "'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images(): from PIL import", "in range(n_children): db[children_ids[i]]['string'] += \"1 FAMC \"+family_id + '\\n' db[children_ids[i]]['famc']", "for individual_id, data in db.items(): if not individual_id.startswith('@I'): continue if", "+ 1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family( db, husband_id,", "if sex == 'M' else 'female') if not last_name: last_name", "sex='M') else: print('reused {}'.format(husband_id)) if not wife_id: if random() <", "[wife_id] husband_id = find_by_birth_date( db, start_year, start_year + 10, sex='M',", "in children_ids: db[family_id]['string'] += \"1 CHIL {}\\n\".format(child_id) return family_id def", "wife_id: exclude += [wife_id] husband_id = find_by_birth_date( db, start_year, start_year", "FAMC \"+family_id + '\\n' db[children_ids[i]]['famc'] = family_id if generations >", "'M' else 'female') if not last_name: last_name = names.get_last_name() birth_place", "in db.items(): if not individual_id.startswith('@I'): continue if 'famc' in data:", "marriage_year, marriage_place=None): if not marriage_place: marriage_place = 'London' if random()", "gedcom_content += '0 TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) #", "names.get_last_name() birth_place = 'Paris' if random() < 0.5 else 'Rome'", "not husband_id: if random() < 0.2: exclude = siblings.copy() if", "2 PLAC {death_place} \"\"\".format(**locals()) return individual_id def generate_family(db, husband_id, wife_id,", "+ int(random()*20) db[individual_id] = { 'birth': birth_year, 'death': death_year, 'sex':", "1 individual_id = '@I{}@'.format(db[\"n_individuals\"]) death_year = birth_year + 40 +", "'Zorge' if random() < 0.5 else 'Bruegge' db['n_individuals'] += 1", "child_id in children_ids: db[family_id]['string'] += \"1 CHIL {}\\n\".format(child_id) return family_id", "= Image.new('RGB', size, color=color) d = ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf',", "'F' if random() < 0.5 else 'M' first_name = names.get_first_name(", "PLAC {marriage_place} \"\"\".format( **locals() )} for child_id in children_ids: db[family_id]['string']", "+ int(random()*5), sex='M') else: print('reused {}'.format(husband_id)) if not wife_id: if", "not last_name: last_name = names.get_last_name() birth_place = 'Paris' if random()", "/ db['max_individuals'])) marriage_year = start_year + 20 + int(random()*5) children_ids", "- 1, children_ids[i] if db[children_ids[i] ]['sex'] == 'M' else None,", "individual_id def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None): if not", "k, v in db.items(): if k.startswith('@F'): gedcom_content += v['string'] gedcom_content", "= siblings.copy() if wife_id: exclude += [wife_id] husband_id = find_by_birth_date(", "if wife_id: exclude += [wife_id] husband_id = find_by_birth_date( db, start_year,", "some gedcom file\"\"\" db = {} db['n_individuals'] = 0 db['max_individuals']", "k.startswith('@I'): gedcom_content += v['string'] for k, v in db.items(): if", "0.3: first_name += ' ' + \\ names.get_first_name(gender='male' if sex", "sex == 'M' else 'female') if not last_name: last_name =", "+ 10, sex='M', exclude=exclude) if not husband_id: husband_id = generate_individual(", "db, db[children_ids[i]]['birth'], generations - 1, children_ids[i] if db[children_ids[i] ]['sex'] ==", "1 GEDC 2 VERS 5.5 1 CHAR UTF-8 1 LANG", "= 'London' if random() < 0.5 else 'Tokio' db['n_families'] +=", "range(n_children): db[children_ids[i]]['string'] += \"1 FAMC \"+family_id + '\\n' db[children_ids[i]]['famc'] =", "None, children_ids[i] if db[children_ids[i] ]['sex'] == 'F' else None, children_ids)", "int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family( db, husband_id, wife_id, children_ids, marriage_year)", "+= ' ' + \\ names.get_first_name(gender='male' if sex == 'M'", "2 PLAC {marriage_place} \"\"\".format( **locals() )} for child_id in children_ids:", "'female') if not last_name: last_name = names.get_last_name() birth_place = 'Paris'", "# generate_gedcom_file() def generate_individual_images(): from PIL import Image, ImageDraw, ImageFont", "{} db['n_individuals'] = 0 db['max_individuals'] = 8000 db['n_families'] = 0", "\"\"\".format(**locals()) return individual_id def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None):", "else None, children_ids) db[husband_id]['string'] += \"1 FAMS \"+family_id + '\\n'", "def generate_individual(db, birth_year, sex=None, last_name=None): if not sex: sex =", "1 SUBM @SUBM@ 1 COPR Copyright (c) 2020 <NAME>,,,. 1", "birth_place = 'Paris' if random() < 0.5 else 'Rome' death_place", "db['n_individuals'] / db['max_individuals'])) marriage_year = start_year + 20 + int(random()*5)", "= find_by_birth_date( db, start_year, start_year + 10, sex='M', exclude=exclude) if", "db[wife_id]['string'] += \"1 FAMS \"+family_id + '\\n' generate_recursive_family(db, generations=8, max_children=4)", "MAY {}'.format(marriage_year) family_id = \"@F{}@\".format(db['n_families']) db[family_id] = {'string': \"\"\"0 {family_id}", "if not sex: sex = 'F' if random() < 0.5", "gedcom_content += v['string'] gedcom_content += '0 TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests',", "db['n_individuals'] += 1 individual_id = '@I{}@'.format(db[\"n_individuals\"]) death_year = birth_year +", "\\ names.get_first_name(gender='male' if sex == 'M' else 'female') if not", "start_year, start_year + 10, sex='M', exclude=exclude) if not husband_id: husband_id", "2 DATE {birth_date} 2 PLAC {birth_place} 1 DEAT 2 DATE", "if random() < 10.9: exclude = siblings.copy() + [husband_id] wife_id", "1 NAME {first_name} /{last_name}/ 1 SEX {sex} 1 BIRT 2", "generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160, 160)):", "= {} db['n_individuals'] = 0 db['max_individuals'] = 8000 db['n_families'] =", "= find_by_birth_date( db, start_year, start_year + 10, sex='F', exclude=exclude) if", "exclude=[]): ids = [] for individual_id, data in db.items(): if", "DATE {birth_date} 2 PLAC {birth_place} 1 DEAT 2 DATE {death_date}", "sex=None, last_name=None): if not sex: sex = 'F' if random()", "'Paris' if random() < 0.5 else 'Rome' death_place = 'Zorge'", "death_place = 'Zorge' if random() < 0.5 else 'Bruegge' db['n_individuals']", "= generate_family( db, husband_id, wife_id, children_ids, marriage_year) for i in", "MARR 2 DATE {marriage_date} 2 PLAC {marriage_place} \"\"\".format( **locals() )}", "\"1 FAMS \"+family_id + '\\n' db[wife_id]['string'] += \"1 FAMS \"+family_id", "else: print('reused {}'.format(husband_id)) if not wife_id: if random() < 10.9:", "+= \"1 FAMS \"+family_id + '\\n' generate_recursive_family(db, generations=8, max_children=4) for", "{marriage_place} \"\"\".format( **locals() )} for child_id in children_ids: db[family_id]['string'] +=", "\"\"\" 0 HEAD 1 SOUR Gramps 2 VERS 3.3.0 2", "gender='male' if sex == 'M' else 'female') if random() <", "{} 2 TIME 15:35:24 1 SUBM @SUBM@ 1 COPR Copyright", "for i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ), 'Age {}'.format(", "GEDC 2 VERS 5.5 1 CHAR UTF-8 1 LANG German", "not individual_id.startswith('@I'): continue if 'famc' in data: if data['birth'] >", "@SUBM@ 1 COPR Copyright (c) 2020 <NAME>,,,. 1 GEDC 2", "(c) 2020 <NAME>,,,. 1 GEDC 2 VERS 5.5 1 CHAR", "generate_individual_images(): from PIL import Image, ImageDraw, ImageFont def generate_one_image(filename, text,", "for i in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year + 1 +", "not marriage_place: marriage_place = 'London' if random() < 0.5 else", "else 'Bruegge' db['n_individuals'] += 1 individual_id = '@I{}@'.format(db[\"n_individuals\"]) death_year =", "def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160,", "{husband_id} 1 WIFE {wife_id} 1 MARR 2 DATE {marriage_date} 2", "3.3.0 2 NAME Gramps 1 DATE {} 2 TIME 15:35:24", "if individual_id not in exclude: ids.append(individual_id) if ids: return ids[int(random()*len(ids))]", "ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0, 0, 0), font=font) img.save(filename) for", "return family_id def find_by_birth_date(db, from_year, to_year, sex, exclude=[]): ids =", "< 0.5 else 'Bruegge' db['n_individuals'] += 1 individual_id = '@I{}@'.format(db[\"n_individuals\"])", "db[husband_id]['string'] += \"1 FAMS \"+family_id + '\\n' db[wife_id]['string'] += \"1", "siblings.copy() + [husband_id] wife_id = find_by_birth_date( db, start_year, start_year +", "ids.append(individual_id) if ids: return ids[int(random()*len(ids))] return None def generate_recursive_family(db, start_year=1000,", "exclude = siblings.copy() if wife_id: exclude += [wife_id] husband_id =", "marriage_date = '1 MAY {}'.format(marriage_year) family_id = \"@F{}@\".format(db['n_families']) db[family_id] =", "children_ids) db[husband_id]['string'] += \"1 FAMS \"+family_id + '\\n' db[wife_id]['string'] +=", "for i in range(n_children): db[children_ids[i]]['string'] += \"1 FAMC \"+family_id +", "'M' else None, children_ids[i] if db[children_ids[i] ]['sex'] == 'F' else", "wife_id, children_ids, marriage_year) for i in range(n_children): db[children_ids[i]]['string'] += \"1", "db, start_year + int(random()*5), sex='F') else: print('reused {}'.format(wife_id)) n_children =", "ids[int(random()*len(ids))] return None def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[],", "+= 1 individual_id = '@I{}@'.format(db[\"n_individuals\"]) death_year = birth_year + 40", "< 0.5 else 'Tokio' db['n_families'] += 1 marriage_date = '1", "{death_place} \"\"\".format(**locals()) return individual_id def generate_family(db, husband_id, wife_id, children_ids, marriage_year,", "names.get_first_name( gender='male' if sex == 'M' else 'female') if random()", "{sex} 1 BIRT 2 DATE {birth_date} 2 PLAC {birth_place} 1", "\"\"\"0 {individual_id} INDI 1 NAME {first_name} /{last_name}/ 1 SEX {sex}", "'M' else 'female') if random() < 0.3: first_name += '", "'last_name': last_name } birth_date = '1 JUN {}'.format(birth_year) death_date =", "generate_individual( db, start_year + int(random()*5), sex='F') else: print('reused {}'.format(wife_id)) n_children", "exclude: ids.append(individual_id) if ids: return ids[int(random()*len(ids))] return None def generate_recursive_family(db,", "db, start_year, start_year + 10, sex='M', exclude=exclude) if not husband_id:", "'famc' in data: if data['birth'] > from_year and data['birth'] <", "return None def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5):", "'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images(): from PIL import Image, ImageDraw,", "max_children=5): if not husband_id: if random() < 0.2: exclude =", "TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images():", "gedcom_content = \"\"\" 0 HEAD 1 SOUR Gramps 2 VERS", "int((1+random()*(max_children-1)) * (1 - db['n_individuals'] / db['max_individuals'])) marriage_year = start_year", "v['string'] gedcom_content += '0 TRLR\\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content)", "exclude = siblings.copy() + [husband_id] wife_id = find_by_birth_date( db, start_year,", "in db.items(): if k.startswith('@I'): gedcom_content += v['string'] for k, v", "first_name += ' ' + \\ names.get_first_name(gender='male' if sex ==", "random() < 0.5 else 'Bruegge' db['n_individuals'] += 1 individual_id =", "i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ), 'Age {}'.format( 1+i*4,", "husband_id, wife_id, children_ids, marriage_year) for i in range(n_children): db[children_ids[i]]['string'] +=", "print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) * (1 - db['n_individuals'] /", "else 'female') if random() < 0.3: first_name += ' '", "'1 JUN {}'.format(birth_year) death_date = '1 JUN {}'.format(birth_year) if not", "== 'M' else None, children_ids[i] if db[children_ids[i] ]['sex'] == 'F'", "birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family( db,", "{}\\n\".format(child_id) return family_id def find_by_birth_date(db, from_year, to_year, sex, exclude=[]): ids", "db[family_id] = {'string': \"\"\"0 {family_id} FAM 1 HUSB {husband_id} 1", "sex='F', exclude=exclude) if not wife_id: wife_id = generate_individual( db, start_year", "import os import datetime from random import random def generate_gedcom_file():", "'sex': sex, 'last_name': last_name } birth_date = '1 JUN {}'.format(birth_year)", "+= v['string'] for k, v in db.items(): if k.startswith('@F'): gedcom_content", "0 HEAD 1 SOUR Gramps 2 VERS 3.3.0 2 NAME", "0, 0), font=font) img.save(filename) for i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format(", "random() < 0.5 else 'M' first_name = names.get_first_name( gender='male' if", "if not husband_id: if random() < 0.2: exclude = siblings.copy()", "font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160, 160)): img =", "= ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0, 0, 0), font=font) img.save(filename)", "db['n_families'] = 0 db['yougest'] = None gedcom_content = \"\"\" 0", "= family_id if generations > 0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations", "ids = [] for individual_id, data in db.items(): if not", "'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images(): from PIL import Image,", "generate_individual(db, birth_year, sex=None, last_name=None): if not sex: sex = 'F'", "= names.get_first_name( gender='male' if sex == 'M' else 'female') if", "20 + int(random()*5) children_ids = [] for i in range(n_children):", "data in db.items(): if not individual_id.startswith('@I'): continue if 'famc' in", "{first_name} /{last_name}/ 1 SEX {sex} 1 BIRT 2 DATE {birth_date}", "sex='F') else: print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) * (1 -", "1 SOUR Gramps 2 VERS 3.3.0 2 NAME Gramps 1", "{birth_date} 2 PLAC {birth_place} 1 DEAT 2 DATE {death_date} 2", "{}'.format(marriage_year) family_id = \"@F{}@\".format(db['n_families']) db[family_id] = {'string': \"\"\"0 {family_id} FAM", "def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None): if not marriage_place:", "SOUR Gramps 2 VERS 3.3.0 2 NAME Gramps 1 DATE", "not in exclude: ids.append(individual_id) if ids: return ids[int(random()*len(ids))] return None", "= '1 MAY {}'.format(marriage_year) family_id = \"@F{}@\".format(db['n_families']) db[family_id] = {'string':", "sex, exclude=[]): ids = [] for individual_id, data in db.items():", "husband_id: husband_id = generate_individual( db, start_year + int(random()*5), sex='M') else:", "if not wife_id: if random() < 10.9: exclude = siblings.copy()", "db[children_ids[i]]['string'] += \"1 FAMC \"+family_id + '\\n' db[children_ids[i]]['famc'] = family_id", "Image, ImageDraw, ImageFont def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100,", "db[children_ids[i]]['famc'] = family_id if generations > 0: generate_recursive_family( db, db[children_ids[i]]['birth'],", "DATE {death_date} 2 PLAC {death_place} \"\"\".format(**locals()) return individual_id def generate_family(db,", "+ [husband_id] wife_id = find_by_birth_date( db, start_year, start_year + 10,", "int(random()*5), sex='F') else: print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) * (1" ]
[ "the destination. # Calculation the angle in radians between the", "could do this # only when the bullet fires, but", "the mouse moves. \"\"\" self.player.center_x = x self.player.center_y = y", "You can leave this out of your own # code,", "BULLET_SPEED self.bullet_list.append(bullet) # Get rid of the bullet when it", "self.player_list.append(self.player) # Add top-left enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5)", "your own # code, but it is needed to easily", "1 # Loop through each enemy that we have for", "shoot bullets aimed at the player. If Python and Arcade", "dest_y = self.player.center_y # Do math to calculate how to", "destination. # Calculation the angle in radians between the start", "bullet will travel. x_diff = dest_x - start_x y_diff =", "is in. You can leave this out of your own", "game logic goes here. \"\"\" self.frame_count += 1 # Loop", "file is in. You can leave this out of your", "at the top of this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path)", "the angle to the player. We could do this #", "but in this case we will rotate # the enemy", "x self.player.center_y = y def main(): \"\"\" Main method \"\"\"", "= x self.player.center_y = y def main(): \"\"\" Main method", "frame if self.frame_count % 60 == 0: bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\")", "self.player.center_y # Do math to calculate how to get the", "with: python -m arcade.examples.sprite_bullets_enemy_aims \"\"\" import arcade import math import", "math.degrees(angle)-90 # Shoot every 60 frames change of shooting each", "for enemy in self.enemy_list: # First, calculate the angle to", "arcade.SpriteList() # Add player ship self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player)", "each frame, so we'll do this # each frame. #", "destination location for the bullet dest_x = self.player.center_x dest_y =", "of shooting each frame if self.frame_count % 60 == 0:", "move, and the game logic goes here. \"\"\" self.frame_count +=", "Get rid of the bullet when it flies off-screen for", "Loop through each enemy that we have for enemy in", "\"\"\"All the logic to move, and the game logic goes", "height, title) # Set the working directory (where we expect", "enemy in self.enemy_list: # First, calculate the angle to the", "points # and end points. This is the angle the", "how to have enemies shoot bullets aimed at the player.", "mentioned at the top of this program. file_path = os.path.dirname(os.path.abspath(__file__))", "in self.enemy_list: # First, calculate the angle to the player.", "fires, but in this case we will rotate # the", "top of this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count", "and end points. This is the angle the bullet will", "the enemy to face the player. enemy.angle = math.degrees(angle)-90 #", "face the player. enemy.angle = math.degrees(angle)-90 # Shoot every 60", "window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if __name__ ==", "os SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Sprites", "enemy that we have for enemy in self.enemy_list: # First,", "SCREEN_TITLE = \"Sprites and Bullets Enemy Aims Example\" BULLET_SPEED =", "enemy.center_x = 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle =", "self.frame_count = 0 self.enemy_list = None self.bullet_list = None self.player_list", "of the bullet when it flies off-screen for bullet in", "logic goes here. \"\"\" self.frame_count += 1 # Loop through", "self.bullet_list.append(bullet) # Get rid of the bullet when it flies", "# Position the start at the enemy's current location start_x", "top-left enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = 120", "on_update(self, delta_time): \"\"\"All the logic to move, and the game", "line with: python -m arcade.examples.sprite_bullets_enemy_aims \"\"\" import arcade import math", "# each frame. # Position the start at the enemy's", "start at the enemy's current location start_x = enemy.center_x start_y", "in this case we will rotate # the enemy to", "be run from the command line with: python -m arcade.examples.sprite_bullets_enemy_aims", "from the command line with: python -m arcade.examples.sprite_bullets_enemy_aims \"\"\" import", "self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time): \"\"\"All the logic to move,", "we have for enemy in self.enemy_list: # First, calculate the", "is how fast the bullet travels. bullet.change_x = math.cos(angle) *", "# Get the destination location for the bullet dest_x =", "self.bullet_list: if bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x,", "# and end points. This is the angle the bullet", "player. We could do this # only when the bullet", "= math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet)", "Add top-left enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x =", "Position the start at the enemy's current location start_x =", "0.5) self.player_list.append(self.player) # Add top-left enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\",", "self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player) # Add top-left enemy ship", "We could do this # only when the bullet fires,", "def main(): \"\"\" Main method \"\"\" window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT,", "example can be run from the command line with: python", "= math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) # Get rid of the", "= math.atan2(y_diff, x_diff) # Set the enemy to face the", "each frame. # Position the start at the enemy's current", "enemy to face the player each frame, so we'll do", "the bullet will travel. x_diff = dest_x - start_x y_diff", "location start_x = enemy.center_x start_y = enemy.center_y # Get the", "enemies shoot bullets aimed at the player. If Python and", "to face the player each frame, so we'll do this", "class \"\"\" def __init__(self, width, height, title): super().__init__(width, height, title)", "is needed to easily run the examples using \"python -m\"", "enemy's current location start_x = enemy.center_x start_y = enemy.center_y #", "= 4 class MyGame(arcade.Window): \"\"\" Main application class \"\"\" def", "and the game logic goes here. \"\"\" self.frame_count += 1", "\"\"\" arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time): \"\"\"All the", "in. You can leave this out of your own #", "enemy.center_x start_y = enemy.center_y # Get the destination location for", "the examples using \"python -m\" # as mentioned at the", "the destination location for the bullet dest_x = self.player.center_x dest_y", "self.bullet_list = None self.player_list = None self.player = None def", "Taking into account the angle, calculate our change_x # and", "into account the angle, calculate our change_x # and change_y.", "+= 1 # Loop through each enemy that we have", "If Python and Arcade are installed, this example can be", "4 class MyGame(arcade.Window): \"\"\" Main application class \"\"\" def __init__(self,", "None self.bullet_list = None self.player_list = None self.player = None", "our change_x # and change_y. Velocity is how fast the", "to find files) to the same # directory this .py", "None self.player_list = None self.player = None def setup(self): self.enemy_list", "Main method \"\"\" window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run()", "self.frame_count % 60 == 0: bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x =", "delta_y): \"\"\"Called whenever the mouse moves. \"\"\" self.player.center_x = x", "Example\" BULLET_SPEED = 4 class MyGame(arcade.Window): \"\"\" Main application class", "the screen. \"\"\" arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time):", "angle, calculate our change_x # and change_y. Velocity is how", "# as mentioned at the top of this program. file_path", "__init__(self, width, height, title): super().__init__(width, height, title) # Set the", "= arcade.SpriteList() # Add player ship self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5)", "leave this out of your own # code, but it", "Get the destination location for the bullet dest_x = self.player.center_x", "enemy to face the player. enemy.angle = math.degrees(angle)-90 # Shoot", "# Calculation the angle in radians between the start points", "each frame if self.frame_count % 60 == 0: bullet =", "run the examples using \"python -m\" # as mentioned at", "# First, calculate the angle to the player. We could", "bullet sprite bullet.angle = math.degrees(angle) # Taking into account the", "and Bullets Enemy Aims Example\" BULLET_SPEED = 4 class MyGame(arcade.Window):", "the start at the enemy's current location start_x = enemy.center_x", "# Loop through each enemy that we have for enemy", "and Arcade are installed, this example can be run from", "arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list = None self.bullet_list = None", "dest_y - start_y angle = math.atan2(y_diff, x_diff) # Set the", "self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() #", "bullet dest_x = self.player.center_x dest_y = self.player.center_y # Do math", "-m\" # as mentioned at the top of this program.", "arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = SCREEN_WIDTH - 120 enemy.center_y = SCREEN_HEIGHT", "self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time): \"\"\"All the logic to", "travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) *", "def on_mouse_motion(self, x, y, delta_x, delta_y): \"\"\"Called whenever the mouse", "0 self.enemy_list = None self.bullet_list = None self.player_list = None", "enemy.angle = 180 self.enemy_list.append(enemy) def on_draw(self): \"\"\"Render the screen. \"\"\"", "between the start points # and end points. This is", "enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = SCREEN_WIDTH - 120 enemy.center_y", "arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x = start_x bullet.center_y = start_y # Angle the", "enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = 120 enemy.center_y = SCREEN_HEIGHT", "account the angle, calculate our change_x # and change_y. Velocity", "= arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = 120 enemy.center_y = SCREEN_HEIGHT -", "python -m arcade.examples.sprite_bullets_enemy_aims \"\"\" import arcade import math import os", "\"\"\" self.player.center_x = x self.player.center_y = y def main(): \"\"\"", "to easily run the examples using \"python -m\" # as", "in radians between the start points # and end points.", "self.enemy_list.append(enemy) def on_draw(self): \"\"\"Render the screen. \"\"\" arcade.start_render() self.enemy_list.draw() self.bullet_list.draw()", "the bullet when it flies off-screen for bullet in self.bullet_list:", "each enemy that we have for enemy in self.enemy_list: #", "rid of the bullet when it flies off-screen for bullet", "this case we will rotate # the enemy to face", "angle in radians between the start points # and end", "arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time): \"\"\"All the logic", "= math.degrees(angle) # Taking into account the angle, calculate our", "= enemy.center_x start_y = enemy.center_y # Get the destination location", "* BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) # Get", "# code, but it is needed to easily run the", "120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy)", "in self.bullet_list: if bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self,", "flies off-screen for bullet in self.bullet_list: if bullet.top < 0:", "this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0", "-m arcade.examples.sprite_bullets_enemy_aims \"\"\" import arcade import math import os SCREEN_WIDTH", "= arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = SCREEN_WIDTH - 120 enemy.center_y =", "we will rotate # the enemy to face the player", "super().__init__(width, height, title) # Set the working directory (where we", "y def main(): \"\"\" Main method \"\"\" window = MyGame(SCREEN_WIDTH,", "mouse moves. \"\"\" self.player.center_x = x self.player.center_y = y def", "is the angle the bullet will travel. x_diff = dest_x", "fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y", "= os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list = None", "to move, and the game logic goes here. \"\"\" self.frame_count", "\"\"\"Called whenever the mouse moves. \"\"\" self.player.center_x = x self.player.center_y", "do this # only when the bullet fires, but in", "the angle in radians between the start points # and", "but it is needed to easily run the examples using", "self.player.center_x = x self.player.center_y = y def main(): \"\"\" Main", "start_y angle = math.atan2(y_diff, x_diff) # Set the enemy to", "enemy.angle = 180 self.enemy_list.append(enemy) # Add top-right enemy ship enemy", "= 600 SCREEN_TITLE = \"Sprites and Bullets Enemy Aims Example\"", "start_y = enemy.center_y # Get the destination location for the", "Bullets Enemy Aims Example\" BULLET_SPEED = 4 class MyGame(arcade.Window): \"\"\"", "Enemy Aims Example\" BULLET_SPEED = 4 class MyGame(arcade.Window): \"\"\" Main", "delta_x, delta_y): \"\"\"Called whenever the mouse moves. \"\"\" self.player.center_x =", "= MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if __name__ == \"__main__\":", "\"\"\" window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if __name__", "Show how to have enemies shoot bullets aimed at the", "180 self.enemy_list.append(enemy) def on_draw(self): \"\"\"Render the screen. \"\"\" arcade.start_render() self.enemy_list.draw()", "bullet.angle = math.degrees(angle) # Taking into account the angle, calculate", "start_y # Angle the bullet sprite bullet.angle = math.degrees(angle) #", "calculate the angle to the player. We could do this", "# Shoot every 60 frames change of shooting each frame", "the bullet to the destination. # Calculation the angle in", "== 0: bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x = start_x bullet.center_y =", "the bullet sprite bullet.angle = math.degrees(angle) # Taking into account", "bullet.center_x = start_x bullet.center_y = start_y # Angle the bullet", "the bullet fires, but in this case we will rotate", "ship self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player) # Add top-left enemy", "# Do math to calculate how to get the bullet", "class MyGame(arcade.Window): \"\"\" Main application class \"\"\" def __init__(self, width,", "how to get the bullet to the destination. # Calculation", "# directory this .py file is in. You can leave", "whenever the mouse moves. \"\"\" self.player.center_x = x self.player.center_y =", "files) to the same # directory this .py file is", "= arcade.SpriteList() self.player_list = arcade.SpriteList() # Add player ship self.player", "here. \"\"\" self.frame_count += 1 # Loop through each enemy", "needed to easily run the examples using \"python -m\" #", "= math.degrees(angle)-90 # Shoot every 60 frames change of shooting", "bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x, y, delta_x,", "the working directory (where we expect to find files) to", "have enemies shoot bullets aimed at the player. If Python", "the bullet dest_x = self.player.center_x dest_y = self.player.center_y # Do", "enemy.center_x = SCREEN_WIDTH - 120 enemy.center_y = SCREEN_HEIGHT - enemy.height", "to the destination. # Calculation the angle in radians between", "moves. \"\"\" self.player.center_x = x self.player.center_y = y def main():", "start_x = enemy.center_x start_y = enemy.center_y # Get the destination", "self.bullet_list.update() def on_mouse_motion(self, x, y, delta_x, delta_y): \"\"\"Called whenever the", "import math import os SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600", "frame, so we'll do this # each frame. # Position", "height, title): super().__init__(width, height, title) # Set the working directory", "code, but it is needed to easily run the examples", "player ship self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player) # Add top-left", "sprite bullet.angle = math.degrees(angle) # Taking into account the angle,", "import arcade import math import os SCREEN_WIDTH = 800 SCREEN_HEIGHT", "math.degrees(angle) # Taking into account the angle, calculate our change_x", "os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list = None self.bullet_list =", "expect to find files) to the same # directory this", "y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) #", "the angle, calculate our change_x # and change_y. Velocity is", "self.frame_count += 1 # Loop through each enemy that we", "dest_x - start_x y_diff = dest_y - start_y angle =", "on_draw(self): \"\"\"Render the screen. \"\"\" arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def", "self.player_list = None self.player = None def setup(self): self.enemy_list =", "change_y. Velocity is how fast the bullet travels. bullet.change_x =", "0.5) enemy.center_x = 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle", "player. enemy.angle = math.degrees(angle)-90 # Shoot every 60 frames change", "= 180 self.enemy_list.append(enemy) # Add top-right enemy ship enemy =", "x, y, delta_x, delta_y): \"\"\"Called whenever the mouse moves. \"\"\"", "enemy.height enemy.angle = 180 self.enemy_list.append(enemy) # Add top-right enemy ship", "to face the player. enemy.angle = math.degrees(angle)-90 # Shoot every", "file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list =", "start_x bullet.center_y = start_y # Angle the bullet sprite bullet.angle", "Add top-right enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x =", "on_mouse_motion(self, x, y, delta_x, delta_y): \"\"\"Called whenever the mouse moves.", "enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) #", "= arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player) # Add top-left enemy ship enemy", "math import os SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE", "= y def main(): \"\"\" Main method \"\"\" window =", "to the same # directory this .py file is in.", "goes here. \"\"\" self.frame_count += 1 # Loop through each", "None def setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list", "SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Sprites and Bullets Enemy Aims", "bullet.change_y = math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) # Get rid of", "dest_x = self.player.center_x dest_y = self.player.center_y # Do math to", "enemy.height enemy.angle = 180 self.enemy_list.append(enemy) def on_draw(self): \"\"\"Render the screen.", "# Add player ship self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player) #", "for bullet in self.bullet_list: if bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update()", "Set the working directory (where we expect to find files)", "\"\"\" self.frame_count += 1 # Loop through each enemy that", "y, delta_x, delta_y): \"\"\"Called whenever the mouse moves. \"\"\" self.player.center_x", "bullet fires, but in this case we will rotate #", "the enemy to face the player each frame, so we'll", "method \"\"\" window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if", "location for the bullet dest_x = self.player.center_x dest_y = self.player.center_y", "frames change of shooting each frame if self.frame_count % 60", "Velocity is how fast the bullet travels. bullet.change_x = math.cos(angle)", "# only when the bullet fires, but in this case", "easily run the examples using \"python -m\" # as mentioned", "the game logic goes here. \"\"\" self.frame_count += 1 #", "are installed, this example can be run from the command", "arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() # Add player", "Angle the bullet sprite bullet.angle = math.degrees(angle) # Taking into", "= SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) def on_draw(self):", "Shoot every 60 frames change of shooting each frame if", "application class \"\"\" def __init__(self, width, height, title): super().__init__(width, height,", "SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) # Add top-right", "at the enemy's current location start_x = enemy.center_x start_y =", "= self.player.center_y # Do math to calculate how to get", "installed, this example can be run from the command line", "# Set the working directory (where we expect to find", "same # directory this .py file is in. You can", "Python and Arcade are installed, this example can be run", "self.enemy_list: # First, calculate the angle to the player. We", "we'll do this # each frame. # Position the start", "60 frames change of shooting each frame if self.frame_count %", "calculate how to get the bullet to the destination. #", "BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) # Get rid", "self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() # Add player ship", "have for enemy in self.enemy_list: # First, calculate the angle", "< 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x, y, delta_x, delta_y):", "60 == 0: bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x = start_x bullet.center_y", "the player. We could do this # only when the", "arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = 120 enemy.center_y = SCREEN_HEIGHT - enemy.height", "logic to move, and the game logic goes here. \"\"\"", "end points. This is the angle the bullet will travel.", "x_diff) # Set the enemy to face the player. enemy.angle", "def on_update(self, delta_time): \"\"\"All the logic to move, and the", "# Taking into account the angle, calculate our change_x #", "through each enemy that we have for enemy in self.enemy_list:", "the angle the bullet will travel. x_diff = dest_x -", "= start_y # Angle the bullet sprite bullet.angle = math.degrees(angle)", "start points # and end points. This is the angle", "points. This is the angle the bullet will travel. x_diff", "Do math to calculate how to get the bullet to", "- 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180", "delta_time): \"\"\"All the logic to move, and the game logic", "\"python -m\" # as mentioned at the top of this", "main(): \"\"\" Main method \"\"\" window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)", "= dest_x - start_x y_diff = dest_y - start_y angle", "= None self.player_list = None self.player = None def setup(self):", "self.enemy_list.append(enemy) # Add top-right enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5)", "the player each frame, so we'll do this # each", "math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) # Get rid of the bullet", "it is needed to easily run the examples using \"python", "out of your own # code, but it is needed", "ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = 120 enemy.center_y =", "bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x, y, delta_x, delta_y): \"\"\"Called whenever", "Arcade are installed, this example can be run from the", "800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Sprites and Bullets Enemy", "this out of your own # code, but it is", ".py file is in. You can leave this out of", "change_x # and change_y. Velocity is how fast the bullet", "Main application class \"\"\" def __init__(self, width, height, title): super().__init__(width,", "using \"python -m\" # as mentioned at the top of", "= None self.player = None def setup(self): self.enemy_list = arcade.SpriteList()", "- enemy.height enemy.angle = 180 self.enemy_list.append(enemy) def on_draw(self): \"\"\"Render the", "this .py file is in. You can leave this out", "math to calculate how to get the bullet to the", "self.player.center_x dest_y = self.player.center_y # Do math to calculate how", "\"\"\" Main application class \"\"\" def __init__(self, width, height, title):", "os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list = None self.bullet_list", "radians between the start points # and end points. This", "angle to the player. We could do this # only", "# Get rid of the bullet when it flies off-screen", "top-right enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = SCREEN_WIDTH", "None self.player = None def setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list", "aimed at the player. If Python and Arcade are installed,", "# the enemy to face the player each frame, so", "= 180 self.enemy_list.append(enemy) def on_draw(self): \"\"\"Render the screen. \"\"\" arcade.start_render()", "math.atan2(y_diff, x_diff) # Set the enemy to face the player.", "SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) def on_draw(self): \"\"\"Render", "enemy.center_y # Get the destination location for the bullet dest_x", "program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list", "def __init__(self, width, height, title): super().__init__(width, height, title) # Set", "setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList()", "# Angle the bullet sprite bullet.angle = math.degrees(angle) # Taking", "self.player_list.draw() def on_update(self, delta_time): \"\"\"All the logic to move, and", "= SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) # Add", "MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if __name__ == \"__main__\": main()", "\"\"\" Show how to have enemies shoot bullets aimed at", "case we will rotate # the enemy to face the", "when it flies off-screen for bullet in self.bullet_list: if bullet.top", "= 0 self.enemy_list = None self.bullet_list = None self.player_list =", "0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x, y, delta_x, delta_y): \"\"\"Called", "- start_y angle = math.atan2(y_diff, x_diff) # Set the enemy", "shooting each frame if self.frame_count % 60 == 0: bullet", "if bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x, y,", "frame. # Position the start at the enemy's current location", "do this # each frame. # Position the start at", "the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y =", "(where we expect to find files) to the same #", "# Add top-left enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x", "\"\"\" def __init__(self, width, height, title): super().__init__(width, height, title) #", "directory this .py file is in. You can leave this", "the logic to move, and the game logic goes here.", "* BULLET_SPEED self.bullet_list.append(bullet) # Get rid of the bullet when", "angle = math.atan2(y_diff, x_diff) # Set the enemy to face", "at the player. If Python and Arcade are installed, this", "directory (where we expect to find files) to the same", "player. If Python and Arcade are installed, this example can", "this example can be run from the command line with:", "= \"Sprites and Bullets Enemy Aims Example\" BULLET_SPEED = 4", "the enemy's current location start_x = enemy.center_x start_y = enemy.center_y", "it flies off-screen for bullet in self.bullet_list: if bullet.top <", "when the bullet fires, but in this case we will", "SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Sprites and", "= arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() # Add", "travel. x_diff = dest_x - start_x y_diff = dest_y -", "the top of this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK)", "arcade.SpriteList() self.player_list = arcade.SpriteList() # Add player ship self.player =", "= None def setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList()", "this # only when the bullet fires, but in this", "600 SCREEN_TITLE = \"Sprites and Bullets Enemy Aims Example\" BULLET_SPEED", "every 60 frames change of shooting each frame if self.frame_count", "This is the angle the bullet will travel. x_diff =", "run from the command line with: python -m arcade.examples.sprite_bullets_enemy_aims \"\"\"", "screen. \"\"\" arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time): \"\"\"All", "command line with: python -m arcade.examples.sprite_bullets_enemy_aims \"\"\" import arcade import", "import os SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE =", "math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) #", "for the bullet dest_x = self.player.center_x dest_y = self.player.center_y #", "First, calculate the angle to the player. We could do", "= dest_y - start_y angle = math.atan2(y_diff, x_diff) # Set", "bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x = start_x bullet.center_y = start_y #", "calculate our change_x # and change_y. Velocity is how fast", "enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) def", "# and change_y. Velocity is how fast the bullet travels.", "self.enemy_list = None self.bullet_list = None self.player_list = None self.player", "if self.frame_count % 60 == 0: bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x", "arcade.examples.sprite_bullets_enemy_aims \"\"\" import arcade import math import os SCREEN_WIDTH =", "the same # directory this .py file is in. You", "= start_x bullet.center_y = start_y # Angle the bullet sprite", "0.5) enemy.center_x = SCREEN_WIDTH - 120 enemy.center_y = SCREEN_HEIGHT -", "self.player_list = arcade.SpriteList() # Add player ship self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\",", "= 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180", "def on_draw(self): \"\"\"Render the screen. \"\"\" arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw()", "BULLET_SPEED = 4 class MyGame(arcade.Window): \"\"\" Main application class \"\"\"", "how fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED", "that we have for enemy in self.enemy_list: # First, calculate", "start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff)", "we expect to find files) to the same # directory", "enemy.angle = math.degrees(angle)-90 # Shoot every 60 frames change of", "the command line with: python -m arcade.examples.sprite_bullets_enemy_aims \"\"\" import arcade", "SCREEN_WIDTH - 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle =", "Set the enemy to face the player. enemy.angle = math.degrees(angle)-90", "<filename>arcade/examples/sprite_bullets_enemy_aims.py \"\"\" Show how to have enemies shoot bullets aimed", "self.player = None def setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list =", "as mentioned at the top of this program. file_path =", "arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player) # Add top-left enemy ship enemy =", "= self.player.center_x dest_y = self.player.center_y # Do math to calculate", "ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = SCREEN_WIDTH - 120", "will rotate # the enemy to face the player each", "= enemy.center_y # Get the destination location for the bullet", "bullets aimed at the player. If Python and Arcade are", "this # each frame. # Position the start at the", "examples using \"python -m\" # as mentioned at the top", "will travel. x_diff = dest_x - start_x y_diff = dest_y", "can leave this out of your own # code, but", "of this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count =", "def setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list =", "of your own # code, but it is needed to", "find files) to the same # directory this .py file", "= None self.bullet_list = None self.player_list = None self.player =", "working directory (where we expect to find files) to the", "rotate # the enemy to face the player each frame,", "width, height, title): super().__init__(width, height, title) # Set the working", "= 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Sprites and Bullets", "# Set the enemy to face the player. enemy.angle =", "x_diff = dest_x - start_x y_diff = dest_y - start_y", "= SCREEN_WIDTH - 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle", "Add player ship self.player = arcade.Sprite(\":resources:images/space_shooter/playerShip1_orange.png\", 0.5) self.player_list.append(self.player) # Add", "\"Sprites and Bullets Enemy Aims Example\" BULLET_SPEED = 4 class", "bullet when it flies off-screen for bullet in self.bullet_list: if", "bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED", "= arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x = start_x bullet.center_y = start_y # Angle", "only when the bullet fires, but in this case we", "change of shooting each frame if self.frame_count % 60 ==", "the player. If Python and Arcade are installed, this example", "off-screen for bullet in self.bullet_list: if bullet.top < 0: bullet.remove_from_sprite_lists()", "bullet in self.bullet_list: if bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def", "so we'll do this # each frame. # Position the", "\"\"\" Main method \"\"\" window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup()", "enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = 120 enemy.center_y", "title): super().__init__(width, height, title) # Set the working directory (where", "the start points # and end points. This is the", "bullet to the destination. # Calculation the angle in radians", "0: bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x = start_x bullet.center_y = start_y", "- enemy.height enemy.angle = 180 self.enemy_list.append(enemy) # Add top-right enemy", "\"\"\"Render the screen. \"\"\" arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self,", "to calculate how to get the bullet to the destination.", "arcade import math import os SCREEN_WIDTH = 800 SCREEN_HEIGHT =", "title) # Set the working directory (where we expect to", "MyGame(arcade.Window): \"\"\" Main application class \"\"\" def __init__(self, width, height,", "player each frame, so we'll do this # each frame.", "own # code, but it is needed to easily run", "to have enemies shoot bullets aimed at the player. If", "face the player each frame, so we'll do this #", "to the player. We could do this # only when", "self.player.center_y = y def main(): \"\"\" Main method \"\"\" window", "- start_x y_diff = dest_y - start_y angle = math.atan2(y_diff,", "to get the bullet to the destination. # Calculation the", "angle the bullet will travel. x_diff = dest_x - start_x", "current location start_x = enemy.center_x start_y = enemy.center_y # Get", "get the bullet to the destination. # Calculation the angle", "the player. enemy.angle = math.degrees(angle)-90 # Shoot every 60 frames", "bullet.center_y = start_y # Angle the bullet sprite bullet.angle =", "enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x = SCREEN_WIDTH -", "\"\"\" import arcade import math import os SCREEN_WIDTH = 800", "and change_y. Velocity is how fast the bullet travels. bullet.change_x", "180 self.enemy_list.append(enemy) # Add top-right enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\",", "can be run from the command line with: python -m", "Aims Example\" BULLET_SPEED = 4 class MyGame(arcade.Window): \"\"\" Main application", "Calculation the angle in radians between the start points #", "# Add top-right enemy ship enemy = arcade.Sprite(\":resources:images/space_shooter/playerShip1_green.png\", 0.5) enemy.center_x", "% 60 == 0: bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\") bullet.center_x = start_x", "bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle)" ]
[ "you know that in two years you will be %d", "usrAge = int(input(\"What is your Age?\")) usrGPA = float(input(\"What is", "is your Name?\") usrAge = int(input(\"What is your Age?\")) usrGPA", "(\"Hello, %s\" % (usrName)) print (\"Did you know that in", "use of Variables. usrName = input(\"What is your Name?\") usrAge", "(\"Did you know that in two years you will be", "#cheap way to get a new line print (\"Hello, %s\"", "in two years you will be %d years old? \"", "input(\"What is your Name?\") usrAge = int(input(\"What is your Age?\"))", "to improve your GPA by %f points to have a", "is your GPA?\")) print () #cheap way to get a", "print (\"Hello, %s\" % (usrName)) print (\"Did you know that", "Part will gather Infos and demonstrate the use of Variables.", "the use of Variables. usrName = input(\"What is your Name?\")", "%s\" % (usrName)) print (\"Did you know that in two", "\" % (usrAge +2)) print (\"Also you need to improve", "way to get a new line print (\"Hello, %s\" %", "GPA?\")) print () #cheap way to get a new line", "by %f points to have a perfect score.\" % (4.0", "usrGPA = float(input(\"What is your GPA?\")) print () #cheap way", "Variables. usrName = input(\"What is your Name?\") usrAge = int(input(\"What", "your Name?\") usrAge = int(input(\"What is your Age?\")) usrGPA =", "% (usrName)) print (\"Did you know that in two years", "print (\"Did you know that in two years you will", "years old? \" % (usrAge +2)) print (\"Also you need", "(usrAge +2)) print (\"Also you need to improve your GPA", "improve your GPA by %f points to have a perfect", "Age?\")) usrGPA = float(input(\"What is your GPA?\")) print () #cheap", "will be %d years old? \" % (usrAge +2)) print", "your Age?\")) usrGPA = float(input(\"What is your GPA?\")) print ()", "<reponame>FreakX23/EBook_Training<filename>app1.py # This Part will gather Infos and demonstrate the", "= float(input(\"What is your GPA?\")) print () #cheap way to", "your GPA?\")) print () #cheap way to get a new", "Infos and demonstrate the use of Variables. usrName = input(\"What", "to get a new line print (\"Hello, %s\" % (usrName))", "two years you will be %d years old? \" %", "(\"Also you need to improve your GPA by %f points", "(usrName)) print (\"Did you know that in two years you", "int(input(\"What is your Age?\")) usrGPA = float(input(\"What is your GPA?\"))", "need to improve your GPA by %f points to have", "to have a perfect score.\" % (4.0 - usrGPA)) print", "of Variables. usrName = input(\"What is your Name?\") usrAge =", "is your Age?\")) usrGPA = float(input(\"What is your GPA?\")) print", "+2)) print (\"Also you need to improve your GPA by", "will gather Infos and demonstrate the use of Variables. usrName", "be %d years old? \" % (usrAge +2)) print (\"Also", "gather Infos and demonstrate the use of Variables. usrName =", "%f points to have a perfect score.\" % (4.0 -", "get a new line print (\"Hello, %s\" % (usrName)) print", "% (usrAge +2)) print (\"Also you need to improve your", "# This Part will gather Infos and demonstrate the use", "= input(\"What is your Name?\") usrAge = int(input(\"What is your", "old? \" % (usrAge +2)) print (\"Also you need to", "= int(input(\"What is your Age?\")) usrGPA = float(input(\"What is your", "Name?\") usrAge = int(input(\"What is your Age?\")) usrGPA = float(input(\"What", "This Part will gather Infos and demonstrate the use of", "float(input(\"What is your GPA?\")) print () #cheap way to get", "%d years old? \" % (usrAge +2)) print (\"Also you", "and demonstrate the use of Variables. usrName = input(\"What is", "have a perfect score.\" % (4.0 - usrGPA)) print ()", "usrName = input(\"What is your Name?\") usrAge = int(input(\"What is", "print () #cheap way to get a new line print", "a new line print (\"Hello, %s\" % (usrName)) print (\"Did", "demonstrate the use of Variables. usrName = input(\"What is your", "line print (\"Hello, %s\" % (usrName)) print (\"Did you know", "your GPA by %f points to have a perfect score.\"", "know that in two years you will be %d years", "that in two years you will be %d years old?", "you need to improve your GPA by %f points to", "print (\"Also you need to improve your GPA by %f", "points to have a perfect score.\" % (4.0 - usrGPA))", "GPA by %f points to have a perfect score.\" %", "you will be %d years old? \" % (usrAge +2))", "years you will be %d years old? \" % (usrAge", "new line print (\"Hello, %s\" % (usrName)) print (\"Did you", "() #cheap way to get a new line print (\"Hello," ]
[ "a = input() a = a.replace('--', '2') a = a.replace('-.',", "a = a.replace('--', '2') a = a.replace('-.', '1') a =", "= a.replace('--', '2') a = a.replace('-.', '1') a = a.replace('.',", "= input() a = a.replace('--', '2') a = a.replace('-.', '1')", "'2') a = a.replace('-.', '1') a = a.replace('.', '0') print(a)", "a.replace('--', '2') a = a.replace('-.', '1') a = a.replace('.', '0')", "input() a = a.replace('--', '2') a = a.replace('-.', '1') a", "<reponame>AmitHasanShuvo/Programming a = input() a = a.replace('--', '2') a =" ]
[ "class Message: def __init__(self, from_channel=None, **kwargs): self._channel = from_channel if", "msg = Message(sender=\"A\", receiver=\"B\") assert msg.sender is \"A\" assert msg.receiver", "return self._channel.sender def receiver(self): return self._channel.receiver class CallbackMessage(Message): def __init__(self,", "@property def carrier(self): return self._channel def sender(self): return self._channel.sender def", "setattr(self, key, value) @property def carrier(self): return self._channel def sender(self):", "__init__(self, from_channel=None, **kwargs): self._channel = from_channel if kwargs is not", "\"__main__\": msg = Message(sender=\"A\", receiver=\"B\") assert msg.sender is \"A\" assert", "<reponame>charlesemurray/DistributedProgramming class Message: def __init__(self, from_channel=None, **kwargs): self._channel = from_channel", "kwargs.items(): setattr(self, key, value) @property def carrier(self): return self._channel def", "self._channel def sender(self): return self._channel.sender def receiver(self): return self._channel.receiver class", "key, value) @property def carrier(self): return self._channel def sender(self): return", "def __init__(self, from_channel=None, **kwargs): self._channel = from_channel if kwargs is", "from_channel=None, **kwargs): self._channel = from_channel if kwargs is not None:", "**kwargs): self._channel = from_channel if kwargs is not None: for", "if kwargs is not None: for key, value in kwargs.items():", "in kwargs.items(): setattr(self, key, value) @property def carrier(self): return self._channel", "def carrier(self): return self._channel def sender(self): return self._channel.sender def receiver(self):", "self._channel.receiver class CallbackMessage(Message): def __init__(self, function): super(CallbackMessage, self).__init__(function=function) if __name__", "None: for key, value in kwargs.items(): setattr(self, key, value) @property", "CallbackMessage(Message): def __init__(self, function): super(CallbackMessage, self).__init__(function=function) if __name__ == \"__main__\":", "= from_channel if kwargs is not None: for key, value", "not None: for key, value in kwargs.items(): setattr(self, key, value)", "receiver(self): return self._channel.receiver class CallbackMessage(Message): def __init__(self, function): super(CallbackMessage, self).__init__(function=function)", "def receiver(self): return self._channel.receiver class CallbackMessage(Message): def __init__(self, function): super(CallbackMessage,", "== \"__main__\": msg = Message(sender=\"A\", receiver=\"B\") assert msg.sender is \"A\"", "self._channel.sender def receiver(self): return self._channel.receiver class CallbackMessage(Message): def __init__(self, function):", "value) @property def carrier(self): return self._channel def sender(self): return self._channel.sender", "for key, value in kwargs.items(): setattr(self, key, value) @property def", "def sender(self): return self._channel.sender def receiver(self): return self._channel.receiver class CallbackMessage(Message):", "value in kwargs.items(): setattr(self, key, value) @property def carrier(self): return", "def __init__(self, function): super(CallbackMessage, self).__init__(function=function) if __name__ == \"__main__\": msg", "super(CallbackMessage, self).__init__(function=function) if __name__ == \"__main__\": msg = Message(sender=\"A\", receiver=\"B\")", "function): super(CallbackMessage, self).__init__(function=function) if __name__ == \"__main__\": msg = Message(sender=\"A\",", "__init__(self, function): super(CallbackMessage, self).__init__(function=function) if __name__ == \"__main__\": msg =", "sender(self): return self._channel.sender def receiver(self): return self._channel.receiver class CallbackMessage(Message): def", "return self._channel def sender(self): return self._channel.sender def receiver(self): return self._channel.receiver", "from_channel if kwargs is not None: for key, value in", "is not None: for key, value in kwargs.items(): setattr(self, key,", "= Message(sender=\"A\", receiver=\"B\") assert msg.sender is \"A\" assert msg.receiver is", "key, value in kwargs.items(): setattr(self, key, value) @property def carrier(self):", "kwargs is not None: for key, value in kwargs.items(): setattr(self,", "Message(sender=\"A\", receiver=\"B\") assert msg.sender is \"A\" assert msg.receiver is \"B\"", "return self._channel.receiver class CallbackMessage(Message): def __init__(self, function): super(CallbackMessage, self).__init__(function=function) if", "self).__init__(function=function) if __name__ == \"__main__\": msg = Message(sender=\"A\", receiver=\"B\") assert", "self._channel = from_channel if kwargs is not None: for key,", "class CallbackMessage(Message): def __init__(self, function): super(CallbackMessage, self).__init__(function=function) if __name__ ==", "if __name__ == \"__main__\": msg = Message(sender=\"A\", receiver=\"B\") assert msg.sender", "carrier(self): return self._channel def sender(self): return self._channel.sender def receiver(self): return", "__name__ == \"__main__\": msg = Message(sender=\"A\", receiver=\"B\") assert msg.sender is", "Message: def __init__(self, from_channel=None, **kwargs): self._channel = from_channel if kwargs" ]
[ "self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This is a test warning') self.assertEquals(len(self.client.events),", "TestCase class LoggingIntegrationTest(TestCase): def setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler", "is a test of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event", "self.assertEquals(msg['message'], 'This is a test error') self.assertEquals(msg['params'], ()) def test_logger_warning(self):", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace'", "'This is a test of no stacks') self.assertFalse('stacktrace' in event)", "test with an exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def test_client_arg(self):", "a test ValueError') except ValueError: self.logger.info('This is a test info", "is a test of %s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This", "is a test with an exception') self.assertEquals(len(self.client.events), 1) event =", "with an exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) #", "event, event) self.assertEquals(event['message'], 'This is a test of stacks') self.assertFalse('exception'", "test ValueError') except ValueError: self.logger.exception('This is a test with an", "def test_explicit_stack(self): self.logger.info('This is a test of stacks', extra={'stack': iter_stack_frames()})", "self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'],", "'This is a test error') self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This", "[] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is a test error') self.assertEquals(len(self.client.events),", "error') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"error\")", "event) def test_extra_culprit(self): self.logger.info('This is a test of stacks', extra={'culprit':", "'This is a test of args') # print event.keys() self.assertFalse('stacktrace'", "test of %s', 'args') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) #", "a test warning') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__)", ")) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in", "def test_logger_basic(self): self.logger.error('This is a test error') self.assertEquals(len(self.client.events), 1) event", "exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test", "client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self):", "a test of no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'],", "self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is a test of no", "= self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test info with", "__name__) self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'], 'This is a test error') self.assertFalse('stacktrace'", "a test info with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception'", "None) self.assertEquals(event['message'], 'This is a test of no stacks') self.assertFalse('stacktrace'", "self.assertEquals(event['message'], 'This is a test info with an exception') self.assertTrue('stacktrace'", "test ValueError') self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This", "self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try: raise ValueError('This is a", "self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler", "iter_stack_frames from tests.helpers import get_tempstoreclient from tests.utils.compat import TestCase class", "test of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This", "()) def test_logger_exc_info(self): try: raise ValueError('This is a test ValueError')", "self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This", "of %s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This is a test", "test error') self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This is a test", "\"warning\") self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event)", "self.assertTrue('stacktrace' in event) def test_extra_culprit(self): self.logger.info('This is a test of", "LoggingHandlerTest(TestCase): def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client,", "stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None)", "is a test of %s', 'args') self.assertEquals(len(self.client.events), 1) event =", "msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test warning') self.assertEquals(msg['params'],", "event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test info", "is a test ValueError') self.assertTrue('param_message' in event) msg = event['param_message']", "of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['culprit'],", "self.assertEquals(msg['message'], 'This is a test of %s') self.assertEquals(msg['params'], ('args',)) def", "ValueError('This is a test ValueError') except ValueError: self.logger.exception('This is a", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames =", "self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try: raise ValueError('This is a test", "= OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler) def", "= self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in", "extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def", "self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg =", "warning') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"warning\")", "test info with an exception') self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This", "event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test with", "test of no stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event)", "1) event = self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack')", "is a test of args') # print event.keys() self.assertFalse('stacktrace' in", "self.handler = OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler)", "a test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event =", "from opbeat.utils.stacks import iter_stack_frames from tests.helpers import get_tempstoreclient from tests.utils.compat", "event['param_message'] self.assertEquals(msg['message'], 'This is a test info with an exception')", "test of stacks') def test_no_record_stack(self): self.logger.info('This is a test of", "'This is a test info with an exception') self.assertTrue('stacktrace' in", "= self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test of args')", "event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test of", "= self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is a test of", "self.logger.error('This is a test error') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "# print event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message'", "self.assertEquals(event['message'], 'This is a test of stacks') self.assertFalse('exception' in event)", "def test_logger_warning(self): self.logger.warning('This is a test warning') self.assertEquals(len(self.client.events), 1) event", "is a test info with an exception') self.assertEquals(msg['params'], ()) def", "<reponame>rupeshparab/techscan import logging from opbeat.handlers.logging import OpbeatHandler from opbeat.utils.stacks import", "an exception') self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This is a test", "self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'], 'This is a test error') self.assertFalse('stacktrace' in", "of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace'", "except ValueError: self.logger.exception('This is a test with an exception') self.assertEquals(len(self.client.events),", "test with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event)", "test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client, client) def", "in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0]", "a test of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This", "test_no_record_stack(self): self.logger.info('This is a test of no stacks', extra={'stack': False})", "event = self.client.events.pop(0) self.assertEquals(event['message'], 'This is a test with an", "test of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is", "self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test of args') #", "self.logger.info('This is a test info with a url', extra=dict( data=dict(", "OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler) def test_logger_basic(self):", "info with an exception') self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This is", "1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is a", "event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try: raise ValueError('This", "()) class LoggingHandlerTest(TestCase): def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler =", "'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message' in event)", "a test of args') # print event.keys() self.assertFalse('stacktrace' in event)", "print event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in", "self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event)", "self.assertEquals(msg['message'], 'This is a test info with a url') self.assertEquals(msg['params'],", "event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ())", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test error') self.assertEquals(msg['params'], ())", "'This is a test of stacks') def test_no_record_stack(self): self.logger.info('This is", "extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'],", "test error') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in", "self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is a test of no stacks')", "'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This is a test", "= self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try: raise ValueError('This is", "def test_logger_exc_info(self): try: raise ValueError('This is a test ValueError') except", "# self.assertEquals(event['message'], 'This is a test info with an exception')", "try: raise ValueError('This is a test ValueError') except ValueError: self.logger.info('This", "is a test warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This is", "of no stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This is a", "'foo.bar') def test_logger_exception(self): try: raise ValueError('This is a test ValueError')", "= event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception'", "test with an exception') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['message'],", "an exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def test_client_arg(self): client =", "import OpbeatHandler from opbeat.utils.stacks import iter_stack_frames from tests.helpers import get_tempstoreclient", "self.logger.warning('This is a test warning') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message']", "), )) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace'", "test_explicit_stack(self): self.logger.info('This is a test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events),", "a test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event) def", "()) self.assertTrue('stacktrace' in event) def test_extra_culprit(self): self.logger.info('This is a test", "event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test info", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event)", "with an exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def test_client_arg(self): client", "self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'], 'This is a test", "is a test info with a url') self.assertEquals(msg['params'], ()) def", "exc_info=True) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is", "test info with a url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try:", "is a test of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'],", "test_logger_exception(self): try: raise ValueError('This is a test ValueError') except ValueError:", "is a test error') self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This is", "OpbeatHandler from opbeat.utils.stacks import iter_stack_frames from tests.helpers import get_tempstoreclient from", "stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar')", "event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message'", "a test warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This is a", "1) event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try: raise", "a test of no stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in", "'This is a test of no stacks') self.assertEquals(msg['params'], ()) def", "OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler =", "from tests.helpers import get_tempstoreclient from tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase):", "= frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event) self.assertTrue('param_message' in event)", "self.client.events.pop(0) self.assertEquals(event['message'], 'This is a test with an exception') self.assertTrue('stacktrace'", "1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace' in", "def test_no_record_stack(self): self.logger.info('This is a test of no stacks', extra={'stack':", "self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message'", "event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of", "a test of %s', 'args') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "test_logger_extra_data(self): self.logger.info('This is a test info with a url', extra=dict(", "self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This is a test of stacks',", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'],", "in event) def test_extra_culprit(self): self.logger.info('This is a test of stacks',", "'This is a test ValueError') self.assertTrue('param_message' in event) msg =", "ValueError') self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is", "True}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames", "raise ValueError('This is a test ValueError') except ValueError: self.logger.exception('This is", "except ValueError: self.logger.info('This is a test info with an exception',", "in event) self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError')", "of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit'", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try:", "handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client) def test_invalid_first_arg_type(self): self.assertRaises(ValueError, OpbeatHandler, object)", "test of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg", "get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client) def test_invalid_first_arg_type(self): self.assertRaises(ValueError, OpbeatHandler,", "is a test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event", "with an exception') self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This is a", "a test with an exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def", "frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEquals(frame['module'], __name__)", "an exception') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['message'], 'This is", "test info with an exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event =", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test info with an", "data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'],", "event = self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames),", "info with an exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a", "test of %s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This is a", "'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack')", "def test_extra_culprit(self): self.logger.info('This is a test of stacks', extra={'culprit': 'foo.bar'})", "is a test ValueError') except ValueError: self.logger.exception('This is a test", "self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message' in event) msg", "'This is a test with an exception') self.assertEquals(msg['params'], ()) class", "with an exception') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['message'], 'This", "= self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1)", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test warning') self.assertEquals(msg['params'], ())", "stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in", "%s', 'args') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test of no stacks')", "of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event) def test_extra_culprit(self): self.logger.info('This", "url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com')", "= logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is", "opbeat.utils.stacks import iter_stack_frames from tests.helpers import get_tempstoreclient from tests.utils.compat import", "test error') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'],", "event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is", "self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event)", "error') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event)", "self.logger.info('This is a test of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1)", "test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event) def test_extra_culprit(self):", "stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message']", "self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try: raise ValueError('This is a test", "self.logger.info('This is a test info with an exception', exc_info=True) self.assertEquals(len(self.client.events),", "is a test info with a url', extra=dict( data=dict( url='http://example.com',", "= event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError')", "is a test of stacks') def test_no_record_stack(self): self.logger.info('This is a", "()) def test_logger_warning(self): self.logger.warning('This is a test warning') self.assertEquals(len(self.client.events), 1)", "a test of no stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This", "a test info with an exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event", "msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks')", "def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client)", "event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in", "in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test", "import iter_stack_frames from tests.helpers import get_tempstoreclient from tests.utils.compat import TestCase", "self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'],", "a test info with an exception') self.assertEquals(msg['params'], ()) def test_message_params(self):", "test info with a url', extra=dict( data=dict( url='http://example.com', ), ))", "event) self.assertEquals(event['message'], 'This is a test of stacks') self.assertFalse('exception' in", "a test with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in", "exception') self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This is a test of", "stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a test", "a test error') self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This is a", "event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test warning')", "a test of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event =", "in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This", "'This is a test error') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in", "self.assertEquals(msg['message'], 'This is a test warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self):", "self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in", "'This is a test warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This", "is a test ValueError') except ValueError: self.logger.info('This is a test", "exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests'])", "url', extra=dict( data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1) event =", "setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client) self.logger =", "is a test warning') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'],", "error') self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This is a test warning')", "stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This is a test of", "ValueError: self.logger.exception('This is a test with an exception') self.assertEquals(len(self.client.events), 1)", "'This is a test with an exception') self.assertTrue('stacktrace' in event)", "ValueError('This is a test ValueError') except ValueError: self.logger.info('This is a", "test ValueError') except ValueError: self.logger.info('This is a test info with", "is a test error') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event)", "msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test info with", "self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This is a test info with", "stacks') def test_no_record_stack(self): self.logger.info('This is a test of no stacks',", "self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event) def test_extra_culprit(self): self.logger.info('This is a", "()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a test of stacks')", "1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'], 'This", "ValueError: self.logger.info('This is a test info with an exception', exc_info=True)", "self.logger.info('This is a test of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1)", "class LoggingHandlerTest(TestCase): def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client)", "is a test info with an exception') self.assertTrue('stacktrace' in event)", "test of no stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This is", "a url', extra=dict( data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1) event", "= get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client) def test_invalid_first_arg_type(self): self.assertRaises(ValueError,", "= get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self): client", "exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This", "self.logger.exception('This is a test with an exception') self.assertEquals(len(self.client.events), 1) event", "tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase): def setUp(self): self.client = get_tempstoreclient(include_paths=['tests',", "def test_logger_extra_data(self): self.logger.info('This is a test info with a url',", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test of %s') self.assertEquals(msg['params'],", "with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc", "self.assertEquals(event['message'], 'This is a test of stacks') def test_no_record_stack(self): self.logger.info('This", "in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is", "self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This is a test of stacks',", "self.logger.info('This is a test of no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events),", "msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of %s')", "'This is a test of %s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self):", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test info with a", "a test of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event =", "a test of stacks') def test_no_record_stack(self): self.logger.info('This is a test", "test_logger_basic(self): self.logger.error('This is a test error') self.assertEquals(len(self.client.events), 1) event =", "is a test of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in", "\"error\") self.assertEquals(event['message'], 'This is a test error') self.assertFalse('stacktrace' in event)", "event = self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message'", "from tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase): def setUp(self): self.client =", "def test_logger_exception(self): try: raise ValueError('This is a test ValueError') except", "self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace' in event) self.assertFalse('exception' in", "with a url', extra=dict( data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1)", "no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'),", "msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test with an", "event['param_message'] self.assertEquals(msg['message'], 'This is a test with an exception') self.assertEquals(msg['params'],", "is a test of no stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self):", "event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace' in event)", "test_record_stack(self): self.logger.info('This is a test of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events),", "self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'], 'This is a test error')", "self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in", "of no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "test warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This is a test", "%s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This is a test of", "self.assertEquals(event['message'], 'This is a test error') self.assertFalse('stacktrace' in event) self.assertFalse('exception'", "import get_tempstoreclient from tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase): def setUp(self):", "'This is a test info with an exception') self.assertEquals(msg['params'], ())", "an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc =", "in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'],", "'args') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is", "LoggingIntegrationTest(TestCase): def setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client)", "info with a url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try: raise", "self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message' in", "a url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try: raise ValueError('This is", "self.assertEquals(msg['message'], 'This is a test info with an exception') self.assertEquals(msg['params'],", "is a test of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event", "of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg =", "event) self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'],", "self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a", "self.assertEquals(event['message'], 'This is a test of no stacks') self.assertFalse('stacktrace' in", "'foo.bar'}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self):", "class LoggingIntegrationTest(TestCase): def setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler =", "'opbeat']) self.handler = OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers = []", "is a test with an exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase):", "'This is a test info with a url') self.assertEquals(msg['params'], ())", "msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of no", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['message'], 'This is a test", "with a url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try: raise ValueError('This", "args') # print event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event)", "in event, event) self.assertEquals(event['message'], 'This is a test of stacks')", "self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is a test error') self.assertEquals(len(self.client.events), 1)", "def setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client) self.logger", "event['param_message'] self.assertEquals(msg['message'], 'This is a test of %s') self.assertEquals(msg['params'], ('args',))", "__name__) self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message'", "get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self): client =", "self.logger = logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This", "in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg =", "url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try: raise ValueError('This is a", "event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'], 'This is", "a test info with a url', extra=dict( data=dict( url='http://example.com', ),", "a test of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in event)", "of args') # print event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception' in", "test of no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event =", "self.assertEquals(event['message'], 'This is a test of args') # print event.keys()", "self.assertEquals(msg['message'], 'This is a test with an exception') self.assertEquals(msg['params'], ())", "event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test error')", "event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is a test", "= self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event)", "extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in event)", "iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit' in event, event)", "self.logger.info('This is a test of %s', 'args') self.assertEquals(len(self.client.events), 1) event", "test_logger_warning(self): self.logger.warning('This is a test warning') self.assertEquals(len(self.client.events), 1) event =", "ValueError') except ValueError: self.logger.info('This is a test info with an", "try: raise ValueError('This is a test ValueError') except ValueError: self.logger.exception('This", "self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This is a", "tests.helpers import get_tempstoreclient from tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase): def", "test of args') # print event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception'", "'This is a test of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message'", "self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client) self.logger = logging.getLogger(__name__)", "()) def test_explicit_stack(self): self.logger.info('This is a test of stacks', extra={'stack':", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is", "get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers =", "of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a", "('args',)) def test_record_stack(self): self.logger.info('This is a test of stacks', extra={'stack':", "get_tempstoreclient from tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase): def setUp(self): self.client", "test_message_params(self): self.logger.info('This is a test of %s', 'args') self.assertEquals(len(self.client.events), 1)", "= OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler", "__name__) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message']", "a test error') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message'", "test_logger_exc_info(self): try: raise ValueError('This is a test ValueError') except ValueError:", "is a test of no stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception'", "extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit' in event,", "event['param_message'] self.assertEquals(msg['message'], 'This is a test error') self.assertEquals(msg['params'], ()) def", "no stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This is a test", "self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event,", "self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg", "test warning') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'],", "exception') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['message'], 'This is a", "a test ValueError') self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'],", "= [] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is a test error')", "self.assertEquals(handler.client, client) def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client)", "a test of %s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This is", "an exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'],", "self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This is a test of", "of no stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message'", "test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test", "test info with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in", "self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'],", "opbeat.handlers.logging import OpbeatHandler from opbeat.utils.stacks import iter_stack_frames from tests.helpers import", "()) def test_message_params(self): self.logger.info('This is a test of %s', 'args')", "1) frame = frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event) self.assertTrue('param_message'", "exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc = event['exception']", "event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in", "is a test info with an exception', exc_info=True) self.assertEquals(len(self.client.events), 1)", "self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This is a test of %s',", "# self.assertEquals(event['message'], 'This is a test of args') # print", "is a test error') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'],", "= event['param_message'] self.assertEquals(msg['message'], 'This is a test with an exception')", "def test_record_stack(self): self.logger.info('This is a test of stacks', extra={'stack': True})", "= self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'], 'This is a", "self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a test of", "test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client) def", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a", "event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event)", "= self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"warning\") self.assertFalse('stacktrace' in event) self.assertFalse('exception'", "from opbeat.handlers.logging import OpbeatHandler from opbeat.utils.stacks import iter_stack_frames from tests.helpers", "ValueError') except ValueError: self.logger.exception('This is a test with an exception')", "raise ValueError('This is a test ValueError') except ValueError: self.logger.info('This is", "import TestCase class LoggingIntegrationTest(TestCase): def setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat'])", "of stacks') def test_no_record_stack(self): self.logger.info('This is a test of no", "1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames']", "client) def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client,", "warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This is a test info", "handler = OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests'])", "def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client, client)", "test of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "a test ValueError') except ValueError: self.logger.exception('This is a test with", "self.assertEquals(msg['message'], 'This is a test of no stacks') self.assertEquals(msg['params'], ())", "()) def test_logger_extra_data(self): self.logger.info('This is a test info with a", "msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test error') self.assertEquals(msg['params'],", "a test info with a url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self):", "'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a test of stacks') def test_no_record_stack(self):", "event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEquals(frame['module'],", "a test error') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__)", "no stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in", "info with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event)", "1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception'", "self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test info with an", "is a test with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception'", "= get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers", "a test with an exception') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "event['param_message'] self.assertEquals(msg['message'], 'This is a test info with a url')", "self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event)", "False}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This", "client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client) def test_invalid_first_arg_type(self):", "self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace'", "frame = frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event) self.assertTrue('param_message' in", "is a test of no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1)", "stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event) def test_extra_culprit(self): self.logger.info('This is", "frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg", "event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This is", "test_extra_culprit(self): self.logger.info('This is a test of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events),", "is a test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event)", "event['param_message'] self.assertEquals(msg['message'], 'This is a test warning') self.assertEquals(msg['params'], ()) def", "self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame =", "import logging from opbeat.handlers.logging import OpbeatHandler from opbeat.utils.stacks import iter_stack_frames", "logging from opbeat.handlers.logging import OpbeatHandler from opbeat.utils.stacks import iter_stack_frames from", "'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event)", "'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in", "= self.client.events.pop(0) self.assertEquals(event['message'], 'This is a test with an exception')", "1) event = self.client.events.pop(0) self.assertEquals(event['message'], 'This is a test with", "self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], \"error\") self.assertEquals(event['message'],", "stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit' in", "logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is a", "of %s', 'args') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'],", "self.assertEquals(event['message'], 'This is a test with an exception') self.assertTrue('stacktrace' in", "self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a test of stacks') def", "self.logger.handlers = [] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is a test", "self.logger.info('This is a test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1)", "info with a url', extra=dict( data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events),", "self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame", "stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event)", "extra=dict( data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0)", "event['param_message'] self.assertEquals(msg['message'], 'This is a test of no stacks') self.assertEquals(msg['params'],", "def test_message_params(self): self.logger.info('This is a test of %s', 'args') self.assertEquals(len(self.client.events)," ]
[ "major): super().__init__() self.name = name self.template = template self.major =", "q_major_minor(dbsession, major, minor): cond = sql.and_( MimeMajor.name == major, Mime.name", "if minor and minor != '*': cond.append(Mime.name == minor) return", "dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result ########### # Filters", "class Mime(Base): def __init__(self, name, template, major): super().__init__() self.name =", "@classmethod def filter_mime(cls, value): (major, minor) = value.split('/') cond =", "# http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): \"\"\"Mime major\"\"\" def __init__(self, name): super().__init__()", "from sqlalchemy import sql from sqlalchemy import orm from sqlalchemy.orm.exc", "self.major = major @property def full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod", "# ########### @classmethod def filter_mime(cls, value): (major, minor) = value.split('/')", ").filter(cond) ).scalar_one_or_none() return result ########### # Filters # ########### @classmethod", "minor): cond = sql.and_( MimeMajor.name == major, Mime.name == minor", "########### # Filters # ########### @classmethod def filter_mime(cls, value): (major,", "from sqlalchemy import orm from sqlalchemy.orm.exc import NoResultFound from ..", "coding: utf-8 -*- # pylint: disable=E1101 from sqlalchemy import sql", "########### @classmethod def filter_mime(cls, value): (major, minor) = value.split('/') cond", "orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result ########### # Filters # ###########", "== major) if minor and minor != '*': cond.append(Mime.name ==", "self.template = template self.major = major @property def full(self): return", "return result ########### # Filters # ########### @classmethod def filter_mime(cls,", "class MimeMajor(Base): \"\"\"Mime major\"\"\" def __init__(self, name): super().__init__() self.name =", "name, template, major): super().__init__() self.name = name self.template = template", "def full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession, major, minor):", "-*- coding: utf-8 -*- # pylint: disable=E1101 from sqlalchemy import", "@staticmethod def q_major_minor(dbsession, major, minor): cond = sql.and_( MimeMajor.name ==", "result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result ###########", "Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): \"\"\"Mime major\"\"\" def __init__(self, name):", "minor ) result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return", "__init__(self, name): super().__init__() self.name = name class Mime(Base): def __init__(self,", "template self.major = major @property def full(self): return '{0}/{1}'.format(self.major.name, self.name)", "= value.split('/') cond = sql.and_() cond.append(MimeMajor.name == major) if minor", "from sqlalchemy.orm.exc import NoResultFound from .. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml", "= name self.template = template self.major = major @property def", "value.split('/') cond = sql.and_() cond.append(MimeMajor.name == major) if minor and", "= major @property def full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def", "filter_mime(cls, value): (major, minor) = value.split('/') cond = sql.and_() cond.append(MimeMajor.name", "major, Mime.name == minor ) result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major)", "= dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result ########### #", "disable=E1101 from sqlalchemy import sql from sqlalchemy import orm from", "-*- # pylint: disable=E1101 from sqlalchemy import sql from sqlalchemy", "def q_major_minor(dbsession, major, minor): cond = sql.and_( MimeMajor.name == major,", "super().__init__() self.name = name self.template = template self.major = major", "major\"\"\" def __init__(self, name): super().__init__() self.name = name class Mime(Base):", "def filter_mime(cls, value): (major, minor) = value.split('/') cond = sql.and_()", "utf-8 -*- # pylint: disable=E1101 from sqlalchemy import sql from", "import sql from sqlalchemy import orm from sqlalchemy.orm.exc import NoResultFound", "template, major): super().__init__() self.name = name self.template = template self.major", "= sql.and_( MimeMajor.name == major, Mime.name == minor ) result", "minor and minor != '*': cond.append(Mime.name == minor) return cond", "sqlalchemy.orm.exc import NoResultFound from .. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class", "full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession, major, minor): cond", "= template self.major = major @property def full(self): return '{0}/{1}'.format(self.major.name,", "# -*- coding: utf-8 -*- # pylint: disable=E1101 from sqlalchemy", "= name class Mime(Base): def __init__(self, name, template, major): super().__init__()", "# Filters # ########### @classmethod def filter_mime(cls, value): (major, minor)", "value): (major, minor) = value.split('/') cond = sql.and_() cond.append(MimeMajor.name ==", "def __init__(self, name, template, major): super().__init__() self.name = name self.template", "sql.and_( MimeMajor.name == major, Mime.name == minor ) result =", "sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result ########### # Filters #", "name self.template = template self.major = major @property def full(self):", "sqlalchemy import sql from sqlalchemy import orm from sqlalchemy.orm.exc import", "import NoResultFound from .. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base):", "cond = sql.and_( MimeMajor.name == major, Mime.name == minor )", "cond = sql.and_() cond.append(MimeMajor.name == major) if minor and minor", "major) if minor and minor != '*': cond.append(Mime.name == minor)", "import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): \"\"\"Mime major\"\"\" def __init__(self,", "from .. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): \"\"\"Mime major\"\"\"", "cond.append(MimeMajor.name == major) if minor and minor != '*': cond.append(Mime.name", "(major, minor) = value.split('/') cond = sql.and_() cond.append(MimeMajor.name == major)", "Filters # ########### @classmethod def filter_mime(cls, value): (major, minor) =", "= sql.and_() cond.append(MimeMajor.name == major) if minor and minor !=", "name): super().__init__() self.name = name class Mime(Base): def __init__(self, name,", "def __init__(self, name): super().__init__() self.name = name class Mime(Base): def", "pylint: disable=E1101 from sqlalchemy import sql from sqlalchemy import orm", ".. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): \"\"\"Mime major\"\"\" def", "NoResultFound from .. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): \"\"\"Mime", "orm from sqlalchemy.orm.exc import NoResultFound from .. import Base #", "== major, Mime.name == minor ) result = dbsession.execute( sql.select(Mime).join(Mime.major).options(", "self.name = name self.template = template self.major = major @property", ").scalar_one_or_none() return result ########### # Filters # ########### @classmethod def", "@property def full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession, major,", "\"\"\"Mime major\"\"\" def __init__(self, name): super().__init__() self.name = name class", "import orm from sqlalchemy.orm.exc import NoResultFound from .. import Base", "sqlalchemy import orm from sqlalchemy.orm.exc import NoResultFound from .. import", "major @property def full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession,", "self.name) @staticmethod def q_major_minor(dbsession, major, minor): cond = sql.and_( MimeMajor.name", "MimeMajor(Base): \"\"\"Mime major\"\"\" def __init__(self, name): super().__init__() self.name = name", "major, minor): cond = sql.and_( MimeMajor.name == major, Mime.name ==", ") result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result", "Mime.name == minor ) result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond)", "MimeMajor.name == major, Mime.name == minor ) result = dbsession.execute(", "Mime(Base): def __init__(self, name, template, major): super().__init__() self.name = name", "result ########### # Filters # ########### @classmethod def filter_mime(cls, value):", "'{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession, major, minor): cond = sql.and_(", "self.name = name class Mime(Base): def __init__(self, name, template, major):", "name class Mime(Base): def __init__(self, name, template, major): super().__init__() self.name", "http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): \"\"\"Mime major\"\"\" def __init__(self, name): super().__init__() self.name", "sql from sqlalchemy import orm from sqlalchemy.orm.exc import NoResultFound from", "== minor ) result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none()", "__init__(self, name, template, major): super().__init__() self.name = name self.template =", "# pylint: disable=E1101 from sqlalchemy import sql from sqlalchemy import", "return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession, major, minor): cond =", "minor) = value.split('/') cond = sql.and_() cond.append(MimeMajor.name == major) if", "super().__init__() self.name = name class Mime(Base): def __init__(self, name, template,", "sql.and_() cond.append(MimeMajor.name == major) if minor and minor != '*':" ]
[ "= Goods.objects.all()[:10] # for good in goods: # json_dict =", "goods: # json_dict = {} # json_dict[\"name\"] = good.name #", "good.add_time # json_list.append(json_dict) # from django.http import HttpResponse # import", "from django.http import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data),", "json_dict = {} # json_dict[\"name\"] = good.name # json_dict[\"category\"] =", "model_to_dict for good in goods: json_dict = model_to_dict(good) json_list.append(json_dict) import", "json_dict[\"add_time\"] = good.add_time # json_list.append(json_dict) # from django.http import HttpResponse", "HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type=\"application/json\") # 注释掉loads,下面语句正常", "from django.views.generic.base import View class GoodsListView(View): def get(self, request): \"\"\"", "json_dict[\"name\"] = good.name # json_dict[\"category\"] = good.category.name # json_dict[\"market_price\"] =", "good.market_price # json_dict[\"add_time\"] = good.add_time # json_list.append(json_dict) # from django.http", "serializers json_data = serializers.serialize('json', goods) json_data = json.loads(json_data) from django.http", "goods: json_dict = model_to_dict(good) json_list.append(json_dict) import json from django.core import", "import json # return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models import model_to_dict for", "json_list.append(json_dict) import json from django.core import serializers json_data = serializers.serialize('json',", "# import json # return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models import model_to_dict", "from django.core import serializers json_data = serializers.serialize('json', goods) json_data =", "django.http import HttpResponse # import json # return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from", "Goods.objects.all()[:10] # for good in goods: # json_dict = {}", "# from django.http import HttpResponse # import json # return", "return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models import model_to_dict for good in goods:", "{} # json_dict[\"name\"] = good.name # json_dict[\"category\"] = good.category.name #", "for good in goods: # json_dict = {} # json_dict[\"name\"]", "json_dict[\"market_price\"] = good.market_price # json_dict[\"add_time\"] = good.add_time # json_list.append(json_dict) #", "from django.http import HttpResponse # import json # return HttpResponse(json.dumps(json_list),content_type=\"application/json\")", "return HttpResponse(json.dumps(json_data), content_type=\"application/json\") # 注释掉loads,下面语句正常 # return HttpResponse(json_data, content_type=\"application/json\") return", "= good.category.name # json_dict[\"market_price\"] = good.market_price # json_dict[\"add_time\"] = good.add_time", "HttpResponse # import json # return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models import", "通过django的view实现商品列表页 \"\"\" json_list = [] goods = Goods.objects.all()[:10] # for", "django.forms.models import model_to_dict for good in goods: json_dict = model_to_dict(good)", "JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type=\"application/json\") # 注释掉loads,下面语句正常 #", "= good.add_time # json_list.append(json_dict) # from django.http import HttpResponse #", "encoding: utf-8 from goods.models import Goods from django.views.generic.base import View", "import json from django.core import serializers json_data = serializers.serialize('json', goods)", "for good in goods: json_dict = model_to_dict(good) json_list.append(json_dict) import json", "json.loads(json_data) from django.http import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return", "json_dict = model_to_dict(good) json_list.append(json_dict) import json from django.core import serializers", "good.name # json_dict[\"category\"] = good.category.name # json_dict[\"market_price\"] = good.market_price #", "HttpResponse(json.dumps(json_data), content_type=\"application/json\") # 注释掉loads,下面语句正常 # return HttpResponse(json_data, content_type=\"application/json\") return JsonResponse(json_data,", "= model_to_dict(good) json_list.append(json_dict) import json from django.core import serializers json_data", "= json.loads(json_data) from django.http import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type #", "django.core import serializers json_data = serializers.serialize('json', goods) json_data = json.loads(json_data)", "= good.market_price # json_dict[\"add_time\"] = good.add_time # json_list.append(json_dict) # from", "# for good in goods: # json_dict = {} #", "# json_dict[\"name\"] = good.name # json_dict[\"category\"] = good.category.name # json_dict[\"market_price\"]", "goods.models import Goods from django.views.generic.base import View class GoodsListView(View): def", "json_dict[\"category\"] = good.category.name # json_dict[\"market_price\"] = good.market_price # json_dict[\"add_time\"] =", "def get(self, request): \"\"\" 通过django的view实现商品列表页 \"\"\" json_list = [] goods", "= [] goods = Goods.objects.all()[:10] # for good in goods:", "# json_dict[\"market_price\"] = good.market_price # json_dict[\"add_time\"] = good.add_time # json_list.append(json_dict)", "# json_list.append(json_dict) # from django.http import HttpResponse # import json", "import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type=\"application/json\") #", "[] goods = Goods.objects.all()[:10] # for good in goods: #", "from django.forms.models import model_to_dict for good in goods: json_dict =", "# encoding: utf-8 from goods.models import Goods from django.views.generic.base import", "json # return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models import model_to_dict for good", "goods = Goods.objects.all()[:10] # for good in goods: # json_dict", "good in goods: json_dict = model_to_dict(good) json_list.append(json_dict) import json from", "# json_dict[\"category\"] = good.category.name # json_dict[\"market_price\"] = good.market_price # json_dict[\"add_time\"]", "get(self, request): \"\"\" 通过django的view实现商品列表页 \"\"\" json_list = [] goods =", "content_type=\"application/json\") # 注释掉loads,下面语句正常 # return HttpResponse(json_data, content_type=\"application/json\") return JsonResponse(json_data, safe=False)", "# json_dict = {} # json_dict[\"name\"] = good.name # json_dict[\"category\"]", "django.http import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type=\"application/json\")", "in goods: json_dict = model_to_dict(good) json_list.append(json_dict) import json from django.core", "# jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type=\"application/json\") # 注释掉loads,下面语句正常 # return", "class GoodsListView(View): def get(self, request): \"\"\" 通过django的view实现商品列表页 \"\"\" json_list =", "= good.name # json_dict[\"category\"] = good.category.name # json_dict[\"market_price\"] = good.market_price", "# return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models import model_to_dict for good in", "View class GoodsListView(View): def get(self, request): \"\"\" 通过django的view实现商品列表页 \"\"\" json_list", "jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type=\"application/json\") # 注释掉loads,下面语句正常 # return HttpResponse(json_data,", "Goods from django.views.generic.base import View class GoodsListView(View): def get(self, request):", "json_list = [] goods = Goods.objects.all()[:10] # for good in", "good.category.name # json_dict[\"market_price\"] = good.market_price # json_dict[\"add_time\"] = good.add_time #", "good in goods: # json_dict = {} # json_dict[\"name\"] =", "request): \"\"\" 通过django的view实现商品列表页 \"\"\" json_list = [] goods = Goods.objects.all()[:10]", "= {} # json_dict[\"name\"] = good.name # json_dict[\"category\"] = good.category.name", "goods) json_data = json.loads(json_data) from django.http import HttpResponse, JsonResponse #", "json_data = serializers.serialize('json', goods) json_data = json.loads(json_data) from django.http import", "json_data = json.loads(json_data) from django.http import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type", "django.views.generic.base import View class GoodsListView(View): def get(self, request): \"\"\" 通过django的view实现商品列表页", "GoodsListView(View): def get(self, request): \"\"\" 通过django的view实现商品列表页 \"\"\" json_list = []", "json_list.append(json_dict) # from django.http import HttpResponse # import json #", "import Goods from django.views.generic.base import View class GoodsListView(View): def get(self,", "model_to_dict(good) json_list.append(json_dict) import json from django.core import serializers json_data =", "HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models import model_to_dict for good in goods: json_dict", "serializers.serialize('json', goods) json_data = json.loads(json_data) from django.http import HttpResponse, JsonResponse", "= serializers.serialize('json', goods) json_data = json.loads(json_data) from django.http import HttpResponse,", "import View class GoodsListView(View): def get(self, request): \"\"\" 通过django的view实现商品列表页 \"\"\"", "\"\"\" 通过django的view实现商品列表页 \"\"\" json_list = [] goods = Goods.objects.all()[:10] #", "from goods.models import Goods from django.views.generic.base import View class GoodsListView(View):", "import HttpResponse # import json # return HttpResponse(json.dumps(json_list),content_type=\"application/json\") from django.forms.models", "in goods: # json_dict = {} # json_dict[\"name\"] = good.name", "import model_to_dict for good in goods: json_dict = model_to_dict(good) json_list.append(json_dict)", "# return HttpResponse(json.dumps(json_data), content_type=\"application/json\") # 注释掉loads,下面语句正常 # return HttpResponse(json_data, content_type=\"application/json\")", "utf-8 from goods.models import Goods from django.views.generic.base import View class", "\"\"\" json_list = [] goods = Goods.objects.all()[:10] # for good", "# json_dict[\"add_time\"] = good.add_time # json_list.append(json_dict) # from django.http import", "json from django.core import serializers json_data = serializers.serialize('json', goods) json_data", "import serializers json_data = serializers.serialize('json', goods) json_data = json.loads(json_data) from" ]
[ "len(args) == 0: parser.error('command name not specified') else: parser.error('too many", "if len(process_name) > 0: system = platform.system() + '-' +", "process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def start(process, options): if process.alive():", "= realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config or pathjoin(o.etc_dir,", "commands = 'Commands: ' + ', '.join(COMMANDS) parser = OptionParser(prog='launcher',", "mode): \"\"\"Open file in read/write mode (without truncating it)\"\"\" return", "% process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, True) makedirs(dirname(options.launcher_log))", "is locked by us' self.pid_file.seek(0) line = self.pid_file.readline().strip() if len(line)", "options.server_log properties['log.enable-console'] = 'false' jvm_properties = load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config)", "%s' % arg) key, value = [i.strip() for i in", "(message, pid)) def stop(process): terminate(process, SIGTERM, 'Stopped') def kill(process): terminate(process,", "v.strip() return properties def load_lines(f): \"\"\"Load lines from a file,", "pid %s failed: %s' % (pid, e)) def read_pid(self): assert", "exclusive lock (inheritable) on a file\"\"\" try: flock(f, LOCK_EX |", "--config)') if key == 'log.output-file': parser.error('cannot specify server log using", "% p) return True except OSError as e: if e.errno", "ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to", "% options.jvm_config) if not exists(options.launcher_config): raise Exception('Launcher config file is", "'%s'\" % (f, basename(f))) p = realpath(dirname(f)) if basename(p) !=", "def open_read_write(f, mode): \"\"\"Open file in read/write mode (without truncating", "return True except OSError as e: raise Exception('Signaling pid %s", "and all intermediate ones\"\"\" try: os.makedirs(p) except OSError as e:", "parser.error('cannot specify config using -D option (use --config)') if key", "terminate(process, SIGTERM, 'Stopped') def kill(process): terminate(process, SIGKILL, 'Killed') def status(process):", "run(process, options): if process.alive(): print('Already running as %s' % process.read_pid())", "is missing: %s' % options.launcher_config) if options.log_levels_set and not exists(options.log_levels):", "env = build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir)", "if not exists(options.config_path): raise Exception('Config file is missing: %s' %", "lock (inheritable) on a file\"\"\" try: flock(f, LOCK_EX | LOCK_NB)", "create_parser() (options, args) = parser.parse_args() if len(args) != 1: if", "% options.log_levels) properties = options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] = options.log_levels", "try: install_path = find_install_path(sys.argv[0]) except Exception as e: print('ERROR: %s'", "%s' % options.launcher_config) if options.log_levels_set and not exists(options.log_levels): raise Exception('Log", "> 0: process.write_pid(pid) print('Started as %s' % pid) return if", "o.arguments = options.arguments or [] if o.verbose: print_options(o) try: handle_command(command,", "self.pid_file.truncate() def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) + '\\n') self.pid_file.flush() def", "os import platform import sys import traceback from fcntl import", "to DATA_DIR/var/log/launcher.log (only in daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to", "SIGTERM, SIGKILL from stat import S_ISLNK from time import sleep", "intermediate ones\"\"\" try: os.makedirs(p) except OSError as e: if e.errno", "O_WRONLY, O_APPEND from os.path import basename, dirname, exists, realpath from", "e.errno != errno.ENOENT: raise return False def create_symlink(source, target): \"\"\"Create", "exists(o.node_config): node_properties = load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir", "pathjoin(options.data_dir, 'etc')) if options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir,", "command += options.arguments if options.verbose: print(command) print(\"\") env = os.environ.copy()", "'Killed') def status(process): if not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running", "another type of file exists\"\"\" try: st = os.lstat(p) if", "v o.arguments = options.arguments or [] if o.verbose: print_options(o) try:", "= realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir,", "in properties.items()] classpath = pathjoin(options.install_path, 'lib', '*') command = ['java',", "support programs that reference 'etc/xyz' from within their config files:", "os.environ.copy() # set process name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '')", "% pid) return if hasattr(os, \"set_inheritable\"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors #", "self.pid_file.write(str(pid) + '\\n') self.pid_file.flush() def alive(self): self.refresh() if self.locked: return", "'etc/xyz' from within their config files: log.levels-file=etc/log.properties \"\"\" if options.etc_dir", "daemon: properties['log.output-file'] = options.server_log properties['log.enable-console'] = 'false' jvm_properties = load_lines(options.jvm_config)", "log levels using -D option (use --log-levels-file)') properties[key] = value", "makedirs(dirname(path)) self.path = path self.pid_file = open_read_write(path, 0o600) self.refresh() def", "import errno import os import platform import sys import traceback", "{} for arg in args: if '=' not in arg:", "if pid <= 0: raise Exception(\"Pid file '%s' contains an", "process name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '') if len(process_name) >", "in append mode\"\"\" # noinspection PyTypeChecker return os.open(f, O_WRONLY |", "> 0 and not line.startswith('#'): lines.append(line) return lines def try_lock(f):", "# Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log)", "not self.locked, 'pid file is locked by us' self.pid_file.seek(0) line", "properties['log.output-file'] = options.server_log properties['log.enable-console'] = 'false' jvm_properties = load_lines(options.jvm_config) launcher_properties", "os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): \"\"\"Check if symlink exists", "if self.locked: return False pid = self.read_pid() try: os.kill(pid, 0)", "line in load_lines(f): k, v = line.split('=', 1) properties[k.strip()] =", "\"\"\"Open a raw file descriptor in append mode\"\"\" # noinspection", "(inheritable) on a file\"\"\" try: flock(f, LOCK_EX | LOCK_NB) return", "process.alive(): print('Already running as %s' % process.read_pid()) return create_app_symlinks(options) args,", "# noinspection PyTypeChecker return os.open(f, O_WRONLY | O_APPEND | O_CREAT,", "'jvm.config')) o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file", "file is missing: %s' % options.config_path) if not exists(options.jvm_config): raise", "a file descriptor\"\"\" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): \"\"\"Check", "O_APPEND | O_CREAT, 0o644) def redirect_output(fd): \"\"\"Redirect stdout and stderr", "redirect_stdin_to_devnull(): \"\"\"Redirect stdin to /dev/null\"\"\" fd = os.open(os.devnull, O_RDWR) os.dup2(fd,", "property\") properties['config'] = options.config_path system_properties = ['-D%s=%s' % i for", "signal import SIGTERM, SIGKILL from stat import S_ISLNK from time", "raise if another type of file exists\"\"\" try: st =", "O_CREAT, 0o644) def redirect_output(fd): \"\"\"Redirect stdout and stderr to a", "o.data_dir = realpath(options.data_dir or data_dir or o.install_path) o.pid_file = realpath(options.pid_file", "= options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] = options.log_levels if daemon: properties['log.output-file']", "def terminate(process, signal, message): if not process.alive(): print('Not running') return", "directory. This is needed to support programs that reference 'etc/xyz'", "import os import platform import sys import traceback from fcntl", "True: try: os.kill(pid, signal) except OSError as e: if e.errno", "if not S_ISLNK(st.st_mode): raise Exception('Path exists and is not a", "= load_properties(options.launcher_config) try: main_class = launcher_properties['main-class'] except KeyError: raise Exception(\"Launcher", "command: %s' % command) try: install_path = find_install_path(sys.argv[0]) except Exception", "self.locked, 'pid file not locked by us' self.pid_file.seek(0) self.pid_file.truncate() def", "options): if process.alive(): print('Already running as %s' % process.read_pid()) return", "SIGTERM, 'Stopped') def kill(process): terminate(process, SIGKILL, 'Killed') def status(process): if", "not a symlink: %s' % p) return True except OSError", "or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log", "os.close(fd) def open_append(f): \"\"\"Open a raw file descriptor in append", "= launcher_properties.get('process-name', '') if len(process_name) > 0: system = platform.system()", "def create_symlink(source, target): \"\"\"Create a symlink, removing the target first", "if key == 'log.levels-file': parser.error('cannot specify log levels using -D", "import OptionParser from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND from", "in read/write mode (without truncating it)\"\"\" return os.fdopen(os.open(f, O_RDWR |", "options.config_path system_properties = ['-D%s=%s' % i for i in properties.items()]", "invalid pid: %s\" % (self.path, pid)) return pid def redirect_stdin_to_devnull():", "%s' % (message, pid)) def stop(process): terminate(process, SIGTERM, 'Stopped') def", "Options() o.verbose = options.verbose o.install_path = install_path o.launcher_config = realpath(options.launcher_config", "platform.machine() shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so') if exists(shim):", "raise Exception(\"Pid file '%s' contains garbage: %s\" % (self.path, line))", "stderr to a file descriptor\"\"\" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def", "'status': status(process) else: raise AssertionError('Unhandled command: ' + command) def", "property') return parser def parse_properties(parser, args): properties = {} for", "class Options: pass def main(): parser = create_parser() (options, args)", "'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN = 4", "O_WRONLY | O_APPEND | O_CREAT, 0o644) def redirect_output(fd): \"\"\"Redirect stdout", "arg: parser.error('property is malformed: %s' % arg) key, value =", "try: handle_command(command, o) except SystemExit: raise except Exception as e:", "sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): \"\"\"Check if symlink exists and", "process.read_pid() while True: try: os.kill(pid, signal) except OSError as e:", "'--verbose', action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc')", "install_path = find_install_path(sys.argv[0]) except Exception as e: print('ERROR: %s' %", "stdout and stderr to a file descriptor\"\"\" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd,", "= create_parser() (options, args) = parser.parse_args() if len(args) != 1:", "self.clear_pid() self.pid_file.write(str(pid) + '\\n') self.pid_file.flush() def alive(self): self.refresh() if self.locked:", "pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path != options.data_dir:", "= try_lock(self.pid_file) def clear_pid(self): assert self.locked, 'pid file not locked", "OptionParser from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND from os.path", "fd = os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f): \"\"\"Open", "in arg: parser.error('property is malformed: %s' % arg) key, value", "exists(source): os.symlink(source, target) def create_app_symlinks(options): \"\"\" Symlink the 'etc' and", "options.node_config and not exists(o.node_config): parser.error('Node config file is missing: %s'", "metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)') parser.add_option('--server-log-file', metavar='FILE',", "= options.log_levels if daemon: properties['log.output-file'] = options.server_log properties['log.enable-console'] = 'false'", "ones\"\"\" try: os.makedirs(p) except OSError as e: if e.errno !=", "Exception('Signaling pid %s failed: %s' % (pid, e)) def read_pid(self):", "as pathjoin from signal import SIGTERM, SIGKILL from stat import", "not exists(o.node_config): parser.error('Node config file is missing: %s' % o.node_config)", "'%s' contains an invalid pid: %s\" % (self.path, pid)) return", "o.properties[k] = v o.arguments = options.arguments or [] if o.verbose:", "'log.levels-file': parser.error('cannot specify log levels using -D option (use --log-levels-file)')", "properties = {} for line in load_lines(f): k, v =", "'%s' to be 'launcher.py' not '%s'\" % (f, basename(f))) p", "dest='arguments', help='Add a program argument of the Java application') parser.add_option('--launcher-log-file',", "args[0] if command not in COMMANDS: parser.error('unsupported command: %s' %", "as %s' % pid) return if hasattr(os, \"set_inheritable\"): # See", "not specified') else: parser.error('too many arguments') command = args[0] if", "'kill': kill(process) elif command == 'status': status(process) else: raise AssertionError('Unhandled", "symlink: %s' % p) return True except OSError as e:", "if options.log_levels_set and not exists(options.log_levels): raise Exception('Log levels file is", "by us' self.pid_file.seek(0) line = self.pid_file.readline().strip() if len(line) == 0:", "options.verbose: print(command) print(\"\") env = os.environ.copy() # set process name:", "try: pid = int(line) except ValueError: raise Exception(\"Pid file '%s'", "is missing: %s' % options.jvm_config) if not exists(options.launcher_config): raise Exception('Launcher", "failed: %s' % (pid, e)) def read_pid(self): assert not self.locked,", "for arg in args: if '=' not in arg: parser.error('property", "load_lines(f): \"\"\"Load lines from a file, ignoring blank or comment", "process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir)", "options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon): if", "self.pid_file.seek(0) line = self.pid_file.readline().strip() if len(line) == 0: raise Exception(\"Pid", "(f, basename(f))) p = realpath(dirname(f)) if basename(p) != 'bin': raise", "if k not in o.properties: o.properties[k] = v o.arguments =", "pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels =", "if e.errno != errno.ENOENT: raise return False def create_symlink(source, target):", "from signal import SIGTERM, SIGKILL from stat import S_ISLNK from", "Process: def __init__(self, path): makedirs(dirname(path)) self.path = path self.pid_file =", "is malformed: %s' % arg) key, value = [i.strip() for", "1) properties[k.strip()] = v.strip() return properties def load_lines(f): \"\"\"Load lines", "PyTypeChecker return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644) def", "lines\"\"\" lines = [] for line in open(f, 'r').readlines(): line", "= line.strip() if len(line) > 0 and not line.startswith('#'): lines.append(line)", "% (self.path, line)) if pid <= 0: raise Exception(\"Pid file", "%s' % (pid, e)) if not process.alive(): process.clear_pid() break sleep(0.1)", "arg) key, value = [i.strip() for i in arg.split('=', 1)]", "system_properties command += [main_class] command += options.arguments if options.verbose: print(command)", "parser.error('command name not specified') else: parser.error('too many arguments') command =", "verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to", "symlink\"\"\" if symlink_exists(target): os.remove(target) if exists(source): os.symlink(source, target) def create_app_symlinks(options):", "system_properties = ['-D%s=%s' % i for i in properties.items()] classpath", "find_install_path(sys.argv[0]) except Exception as e: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN)", "0: raise Exception(\"Pid file '%s' is empty\" % self.path) try:", "'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path != options.data_dir: create_symlink(", "print(\"\") env = os.environ.copy() # set process name: https://github.com/electrum/procname process_name", "self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) + '\\n') self.pid_file.flush()", "sleep COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING", "os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f): \"\"\"Open a raw", "os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args, env) def terminate(process, signal,", "if symlink_exists(target): os.remove(target) if exists(source): os.symlink(source, target) def create_app_symlinks(options): \"\"\"", "raise Exception('Signaling pid %s failed: %s' % (pid, e)) if", "= int(line) except ValueError: raise Exception(\"Pid file '%s' contains garbage:", "in args: if '=' not in arg: parser.error('property is malformed:", "%prog [options] command', description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely')", "args: if '=' not in arg: parser.error('property is malformed: %s'", "0 and not line.startswith('#'): lines.append(line) return lines def try_lock(f): \"\"\"Try", "file descriptor in append mode\"\"\" # noinspection PyTypeChecker return os.open(f,", "not exists(options.jvm_config): raise Exception('JVM config file is missing: %s' %", "'libprocname.so') if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' +", "not process.alive(): process.clear_pid() break sleep(0.1) print('%s %s' % (message, pid))", "pid <= 0: raise Exception(\"Pid file '%s' contains an invalid", "parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties')", "realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log'))", "0: process.write_pid(pid) print('Started as %s' % pid) return if hasattr(os,", "options.verbose o.install_path = install_path o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties'))", "\"\"\"Check if symlink exists and raise if another type of", "levels file is missing: %s' % options.log_levels) properties = options.properties.copy()", "contains an invalid pid: %s\" % (self.path, pid)) return pid", "if options.verbose: print(command) print(\"\") env = os.environ.copy() # set process", "dest='properties', help='Set a Java system property') return parser def parse_properties(parser,", "directory to be 'bin' not '%s\" % (f, basename(p))) return", "%s' % options.jvm_config) if not exists(options.launcher_config): raise Exception('Launcher config file", "+= [main_class] command += options.arguments if options.verbose: print(command) print(\"\") env", "break sleep(0.1) print('%s %s' % (message, pid)) def stop(process): terminate(process,", "os.symlink(source, target) def create_app_symlinks(options): \"\"\" Symlink the 'etc' and 'plugin'", "INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to", "command == 'run': run(process, options) elif command == 'start': start(process,", "'restart': stop(process) start(process, options) elif command == 'kill': kill(process) elif", "if len(line) == 0: raise Exception(\"Pid file '%s' is empty\"", "if e.errno != errno.ESRCH: raise Exception('Signaling pid %s failed: %s'", "open_read_write(path, 0o600) self.refresh() def refresh(self): self.locked = try_lock(self.pid_file) def clear_pid(self):", "write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) + '\\n') self.pid_file.flush() def alive(self): self.refresh()", "to support programs that reference 'etc/xyz' from within their config", "S_ISLNK(st.st_mode): raise Exception('Path exists and is not a symlink: %s'", "try_lock(self.pid_file) def clear_pid(self): assert self.locked, 'pid file not locked by", "command) def create_parser(): commands = 'Commands: ' + ', '.join(COMMANDS)", "process_name = launcher_properties.get('process-name', '') if len(process_name) > 0: system =", "= %s\" % (i, getattr(options, i))) print(\"\") class Options: pass", "sys.stdin.fileno()) os.close(fd) def open_append(f): \"\"\"Open a raw file descriptor in", "help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults", "try: flock(f, LOCK_EX | LOCK_NB) return True except (IOError, OSError):", "file descriptor\"\"\" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): \"\"\"Check if", "system, 'libprocname.so') if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':'", "e: raise Exception('Signaling pid %s failed: %s' % (pid, e))", "int(line) except ValueError: raise Exception(\"Pid file '%s' contains garbage: %s\"", "% (pid, e)) def read_pid(self): assert not self.locked, 'pid file", "Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args,", "files: log.levels-file=etc/log.properties \"\"\" if options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir,", "parser.error('unsupported command: %s' % command) try: install_path = find_install_path(sys.argv[0]) except", "os.path import basename, dirname, exists, realpath from os.path import join", "'Commands: ' + ', '.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage: %prog", "def try_lock(f): \"\"\"Try to open an exclusive lock (inheritable) on", "LSB_STATUS_UNKNOWN = 4 def find_install_path(f): \"\"\"Find canonical parent of bin/launcher.py\"\"\"", "%s' % process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, True)", "= v.strip() return properties def load_lines(f): \"\"\"Load lines from a", "value = [i.strip() for i in arg.split('=', 1)] if key", "status(process) else: raise AssertionError('Unhandled command: ' + command) def create_parser():", "o.verbose: traceback.print_exc() else: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) if __name__", "def load_properties(f): \"\"\"Load key/value pairs from a file\"\"\" properties =", "start(process, options) elif command == 'kill': kill(process) elif command ==", "options) elif command == 'stop': stop(process) elif command == 'restart':", "to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a program argument", "SIGKILL, 'Killed') def status(process): if not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING)", "parser.error('property is malformed: %s' % arg) key, value = [i.strip()", "not S_ISLNK(st.st_mode): raise Exception('Path exists and is not a symlink:", "if len(args) != 1: if len(args) == 0: parser.error('command name", "\"\"\"Create a symlink, removing the target first if it is", "properties def print_options(options): if options.verbose: for i in sorted(vars(options)): print(\"%-15s", "or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file) if options.node_config and not", "%s failed: %s' % (pid, e)) def read_pid(self): assert not", "process.write_pid(pid) print('Started as %s' % pid) return if hasattr(os, \"set_inheritable\"):", "v in node_properties.items(): if k not in o.properties: o.properties[k] =", "metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file',", "e)) if not process.alive(): process.clear_pid() break sleep(0.1) print('%s %s' %", "stop(process) elif command == 'restart': stop(process) start(process, options) elif command", "LOCK_EX, LOCK_NB from optparse import OptionParser from os import O_RDWR,", "file '%s' contains an invalid pid: %s\" % (self.path, pid))", "%s' % options.config_path) if not exists(options.jvm_config): raise Exception('JVM config file", "if symlink exists and raise if another type of file", "exists\"\"\" try: st = os.lstat(p) if not S_ISLNK(st.st_mode): raise Exception('Path", "exists(options.launcher_config): raise Exception('Launcher config file is missing: %s' % options.launcher_config)", "or {}) for k, v in node_properties.items(): if k not", "argument of the Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log", "self.locked = try_lock(self.pid_file) def clear_pid(self): assert self.locked, 'pid file not", "bool(options.log_levels_file) if options.node_config and not exists(o.node_config): parser.error('Node config file is", "LOCK_EX | LOCK_NB) return True except (IOError, OSError): # IOError", "[main_class] command += options.arguments if options.verbose: print(command) print(\"\") env =", "failed: %s' % (pid, e)) if not process.alive(): process.clear_pid() break", "pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config =", "makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def start(process, options):", "'r').readlines(): line = line.strip() if len(line) > 0 and not", "for k, v in node_properties.items(): if k not in o.properties:", "target) def create_app_symlinks(options): \"\"\" Symlink the 'etc' and 'plugin' directory", "if options.verbose: for i in sorted(vars(options)): print(\"%-15s = %s\" %", "metavar='NAME=VALUE', dest='properties', help='Set a Java system property') return parser def", "except SystemExit: raise except Exception as e: if o.verbose: traceback.print_exc()", "options.launcher_config) if options.log_levels_set and not exists(options.log_levels): raise Exception('Log levels file", "# See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid()", "Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon", "i))) print(\"\") class Options: pass def main(): parser = create_parser()", "# IOError in Python 2, OSError in Python 3. return", "args, env = build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir)", "'lib', '*') command = ['java', '-cp', classpath] command += jvm_properties", "ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to", "return lines def try_lock(f): \"\"\"Try to open an exclusive lock", "--log-levels-file)') properties[key] = value return properties def print_options(options): if options.verbose:", "from within their config files: log.levels-file=etc/log.properties \"\"\" if options.etc_dir !=", "= realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir,", "descriptor in append mode\"\"\" # noinspection PyTypeChecker return os.open(f, O_WRONLY", "options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options,", "options.log_levels) properties = options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] = options.log_levels if", "print('%s %s' % (message, pid)) def stop(process): terminate(process, SIGTERM, 'Stopped')", "file is missing: %s' % options.log_levels) properties = options.properties.copy() if", "if len(line) > 0 and not line.startswith('#'): lines.append(line) return lines", "try: st = os.lstat(p) if not S_ISLNK(st.st_mode): raise Exception('Path exists", "blank or comment lines\"\"\" lines = [] for line in", "self.refresh() def refresh(self): self.locked = try_lock(self.pid_file) def clear_pid(self): assert self.locked,", "\"\"\" Symlink the 'etc' and 'plugin' directory into the data", "time import sleep COMMANDS = ['run', 'start', 'stop', 'restart', 'kill',", "for line in open(f, 'r').readlines(): line = line.strip() if len(line)", "'*') command = ['java', '-cp', classpath] command += jvm_properties +", "'launcher.py' not '%s'\" % (f, basename(f))) p = realpath(dirname(f)) if", "'etc')) o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config", "pid)) return pid def redirect_stdin_to_devnull(): \"\"\"Redirect stdin to /dev/null\"\"\" fd", "| O_CREAT, 0o644) def redirect_output(fd): \"\"\"Redirect stdout and stderr to", "file is missing: %s' % options.launcher_config) if options.log_levels_set and not", "def symlink_exists(p): \"\"\"Check if symlink exists and raise if another", "{}) for k, v in node_properties.items(): if k not in", "file '%s' is empty\" % self.path) try: pid = int(line)", "import S_ISLNK from time import sleep COMMANDS = ['run', 'start',", "except OSError as e: if e.errno != errno.ESRCH: raise Exception('Signaling", "def status(process): if not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as", "['java', '-cp', classpath] command += jvm_properties + system_properties command +=", "= launcher_properties['main-class'] except KeyError: raise Exception(\"Launcher config is missing 'main-class'", "if not process.alive(): print('Not running') return pid = process.read_pid() while", "while True: try: os.kill(pid, signal) except OSError as e: if", "key, value = [i.strip() for i in arg.split('=', 1)] if", "options.log_levels_set and not exists(options.log_levels): raise Exception('Log levels file is missing:", "or pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels", "p) return True except OSError as e: if e.errno !=", "O_RDWR | O_CREAT, mode), 'r+') class Process: def __init__(self, path):", "stdin to /dev/null\"\"\" fd = os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd)", "raise Exception(\"Launcher config is missing 'main-class' property\") properties['config'] = options.config_path", "OSError as e: raise Exception('Signaling pid %s failed: %s' %", "def kill(process): terminate(process, SIGKILL, 'Killed') def status(process): if not process.alive():", "!= 1: if len(args) == 0: parser.error('command name not specified')", "terminate(process, signal, message): if not process.alive(): print('Not running') return pid", "missing: %s' % options.log_levels) properties = options.properties.copy() if exists(options.log_levels): properties['log.levels-file']", "ignoring blank or comment lines\"\"\" lines = [] for line", "\"\"\"Try to open an exclusive lock (inheritable) on a file\"\"\"", "args, env) def start(process, options): if process.alive(): print('Already running as", "print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) if __name__ == '__main__': main()", "Exception(\"Expected file '%s' directory to be 'bin' not '%s\" %", "process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as %s' % process.read_pid()) def", "mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)')", "parse_properties(parser, args): properties = {} for arg in args: if", "else: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) if __name__ == '__main__':", "realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file) if options.node_config and", "= args[0] if command not in COMMANDS: parser.error('unsupported command: %s'", "if another type of file exists\"\"\" try: st = os.lstat(p)", "= os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f): \"\"\"Open a", "config file is missing: %s' % o.node_config) node_properties = {}", "not exists(options.config_path): raise Exception('Config file is missing: %s' % options.config_path)", "and stderr to a file descriptor\"\"\" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno())", "['run', 'start', 'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN", "path): makedirs(dirname(path)) self.path = path self.pid_file = open_read_write(path, 0o600) self.refresh()", "Exception as e: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) o =", "daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system", "%s' % options.log_levels) properties = options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] =", "k, v in node_properties.items(): if k not in o.properties: o.properties[k]", "os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644) def redirect_output(fd): \"\"\"Redirect", "import platform import sys import traceback from fcntl import flock,", "command = ['java', '-cp', classpath] command += jvm_properties + system_properties", "exists(o.node_config): parser.error('Node config file is missing: %s' % o.node_config) node_properties", "+ ', '.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage: %prog [options] command',", "set process name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '') if len(process_name)", "return False def open_read_write(f, mode): \"\"\"Open file in read/write mode", "from os.path import basename, dirname, exists, realpath from os.path import", "help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE',", "config file is missing: %s' % options.launcher_config) if options.log_levels_set and", "missing 'main-class' property\") properties['config'] = options.config_path system_properties = ['-D%s=%s' %", "to a file descriptor\"\"\" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p):", "= {} for arg in args: if '=' not in", "to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults", "command == 'start': start(process, options) elif command == 'stop': stop(process)", "lines = [] for line in open(f, 'r').readlines(): line =", "pid): self.clear_pid() self.pid_file.write(str(pid) + '\\n') self.pid_file.flush() def alive(self): self.refresh() if", "parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config')", "system = platform.system() + '-' + platform.machine() shim = pathjoin(options.install_path,", "or pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config", "symlink_exists(target): os.remove(target) if exists(source): os.symlink(source, target) def create_app_symlinks(options): \"\"\" Symlink", "if basename(f) != 'launcher.py': raise Exception(\"Expected file '%s' to be", "in open(f, 'r').readlines(): line = line.strip() if len(line) > 0", "def start(process, options): if process.alive(): print('Already running as %s' %", "pid %s failed: %s' % (pid, e)) if not process.alive():", "+ '-' + platform.machine() shim = pathjoin(options.install_path, 'bin', 'procname', system,", "Symlink the 'etc' and 'plugin' directory into the data directory.", "not in COMMANDS: parser.error('unsupported command: %s' % command) try: install_path", "> 0: system = platform.system() + '-' + platform.machine() shim", "COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING =", "pid = os.fork() if pid > 0: process.write_pid(pid) print('Started as", "signal, message): if not process.alive(): print('Not running') return pid =", "parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid')", "programs that reference 'etc/xyz' from within their config files: log.levels-file=etc/log.properties", "if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' + shim).strip()", "env) def terminate(process, signal, message): if not process.alive(): print('Not running')", "open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork() if pid > 0:", "if daemon: properties['log.output-file'] = options.server_log properties['log.enable-console'] = 'false' jvm_properties =", "raise Exception('JVM config file is missing: %s' % options.jvm_config) if", "!= options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon):", "print(\"%-15s = %s\" % (i, getattr(options, i))) print(\"\") class Options:", "1: if len(args) == 0: parser.error('command name not specified') else:", "False def open_read_write(f, mode): \"\"\"Open file in read/write mode (without", "to be 'bin' not '%s\" % (f, basename(p))) return dirname(p)", "symlink_exists(p): \"\"\"Check if symlink exists and raise if another type", "except OSError as e: raise Exception('Signaling pid %s failed: %s'", "= find_install_path(sys.argv[0]) except Exception as e: print('ERROR: %s' % e)", "node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or data_dir or o.install_path) o.pid_file =", "See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull()", "% e) sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose = options.verbose o.install_path", "= {} for line in load_lines(f): k, v = line.split('=',", "or data_dir or o.install_path) o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid'))", "args, env = build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0],", "print(command) print(\"\") env = os.environ.copy() # set process name: https://github.com/electrum/procname", "i in sorted(vars(options)): print(\"%-15s = %s\" % (i, getattr(options, i)))", "| LOCK_NB) return True except (IOError, OSError): # IOError in", "3. return False def open_read_write(f, mode): \"\"\"Open file in read/write", "directory into the data directory. This is needed to support", "file is locked by us' self.pid_file.seek(0) line = self.pid_file.readline().strip() if", "\"\"\"Load key/value pairs from a file\"\"\" properties = {} for", "= open_read_write(path, 0o600) self.refresh() def refresh(self): self.locked = try_lock(self.pid_file) def", "def redirect_stdin_to_devnull(): \"\"\"Redirect stdin to /dev/null\"\"\" fd = os.open(os.devnull, O_RDWR)", "raise Exception(\"Pid file '%s' is empty\" % self.path) try: pid", "\"\"\"Load lines from a file, ignoring blank or comment lines\"\"\"", "command == 'stop': stop(process) elif command == 'restart': stop(process) start(process,", "sorted(vars(options)): print(\"%-15s = %s\" % (i, getattr(options, i))) print(\"\") class", "self.pid_file.flush() def alive(self): self.refresh() if self.locked: return False pid =", "launcher_properties = load_properties(options.launcher_config) try: main_class = launcher_properties['main-class'] except KeyError: raise", "in daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in", "o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config or", "e.errno != errno.EEXIST: raise def load_properties(f): \"\"\"Load key/value pairs from", "or pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser, options.properties or {}) for", "', '.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands)", "'start': start(process, options) elif command == 'stop': stop(process) elif command", "locked by us' self.pid_file.seek(0) line = self.pid_file.readline().strip() if len(line) ==", "-D option (use --server-log-file)') if key == 'log.levels-file': parser.error('cannot specify", "pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path =", "if key == 'log.output-file': parser.error('cannot specify server log using -D", "metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)') parser.add_option('-D', action='append',", "is missing: %s' % o.node_config) node_properties = {} if exists(o.node_config):", "== 0: parser.error('command name not specified') else: parser.error('too many arguments')", "data_dir or o.install_path) o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log", "== 0: raise Exception(\"Pid file '%s' is empty\" % self.path)", "return properties def load_lines(f): \"\"\"Load lines from a file, ignoring", "print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose =", "load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try: main_class = launcher_properties['main-class'] except KeyError:", "signal) except OSError as e: if e.errno != errno.ESRCH: raise", "arg.split('=', 1)] if key == 'config': parser.error('cannot specify config using", "os.fork() if pid > 0: process.write_pid(pid) print('Started as %s' %", "elif command == 'status': status(process) else: raise AssertionError('Unhandled command: '", "log using -D option (use --server-log-file)') if key == 'log.levels-file':", "pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file) if options.node_config and not exists(o.node_config):", "= realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path,", "open_append(f): \"\"\"Open a raw file descriptor in append mode\"\"\" #", "kill(process): terminate(process, SIGKILL, 'Killed') def status(process): if not process.alive(): print('Not", "'start', 'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN =", "a symlink, removing the target first if it is a", "def find_install_path(f): \"\"\"Find canonical parent of bin/launcher.py\"\"\" if basename(f) !=", "%s' % o.node_config) node_properties = {} if exists(o.node_config): node_properties =", "args): properties = {} for arg in args: if '='", "= options.config_path system_properties = ['-D%s=%s' % i for i in", "mode\"\"\" # noinspection PyTypeChecker return os.open(f, O_WRONLY | O_APPEND |", "exists(options.log_levels): raise Exception('Log levels file is missing: %s' % options.log_levels)", "dirname(p) def makedirs(p): \"\"\"Create directory and all intermediate ones\"\"\" try:", "pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so') if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD',", "comment lines\"\"\" lines = [] for line in open(f, 'r').readlines():", "try: os.kill(pid, signal) except OSError as e: if e.errno !=", "self.pid_file.readline().strip() if len(line) == 0: raise Exception(\"Pid file '%s' is", "raise Exception('Signaling pid %s failed: %s' % (pid, e)) def", "raise Exception(\"Pid file '%s' contains an invalid pid: %s\" %", "basename(f))) p = realpath(dirname(f)) if basename(p) != 'bin': raise Exception(\"Expected", "option (use --config)') if key == 'log.output-file': parser.error('cannot specify server", "empty\" % self.path) try: pid = int(line) except ValueError: raise", "start(process, options): if process.alive(): print('Already running as %s' % process.read_pid())", "else: parser.error('too many arguments') command = args[0] if command not", "Exception('Path exists and is not a symlink: %s' % p)", "%s' % (pid, e)) def read_pid(self): assert not self.locked, 'pid", "import O_RDWR, O_CREAT, O_WRONLY, O_APPEND from os.path import basename, dirname,", "(self.path, pid)) return pid def redirect_stdin_to_devnull(): \"\"\"Redirect stdin to /dev/null\"\"\"", "env = os.environ.copy() # set process name: https://github.com/electrum/procname process_name =", "locked by us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid)", "'=' not in arg: parser.error('property is malformed: %s' % arg)", "o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file) if", "'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config", "o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config or", "%s' % command) try: install_path = find_install_path(sys.argv[0]) except Exception as", "message): if not process.alive(): print('Not running') return pid = process.read_pid()", "help='Add a program argument of the Java application') parser.add_option('--launcher-log-file', metavar='FILE',", "or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config", "options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'),", "in sorted(vars(options)): print(\"%-15s = %s\" % (i, getattr(options, i))) print(\"\")", "except OSError as e: if e.errno != errno.ENOENT: raise return", "raise except Exception as e: if o.verbose: traceback.print_exc() else: print('ERROR:", "if command == 'run': run(process, options) elif command == 'start':", "o = Options() o.verbose = options.verbose o.install_path = install_path o.launcher_config", "create_parser(): commands = 'Commands: ' + ', '.join(COMMANDS) parser =", "option (use --server-log-file)') if key == 'log.levels-file': parser.error('cannot specify log", "be 'launcher.py' not '%s'\" % (f, basename(f))) p = realpath(dirname(f))", "key == 'log.levels-file': parser.error('cannot specify log levels using -D option", "missing: %s' % options.launcher_config) if options.log_levels_set and not exists(options.log_levels): raise", "p = realpath(dirname(f)) if basename(p) != 'bin': raise Exception(\"Expected file", "Java system property') return parser def parse_properties(parser, args): properties =", "or o.install_path) o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log =", "pathjoin(options.install_path, 'lib', '*') command = ['java', '-cp', classpath] command +=", "options.arguments or [] if o.verbose: print_options(o) try: handle_command(command, o) except", "on a file\"\"\" try: flock(f, LOCK_EX | LOCK_NB) return True", "properties['log.enable-console'] = 'false' jvm_properties = load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try:", "% o.node_config) node_properties = {} if exists(o.node_config): node_properties = load_properties(o.node_config)", "OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False,", "all intermediate ones\"\"\" try: os.makedirs(p) except OSError as e: if", "for i in properties.items()] classpath = pathjoin(options.install_path, 'lib', '*') command", "the data directory. This is needed to support programs that", "if e.errno != errno.EEXIST: raise def load_properties(f): \"\"\"Load key/value pairs", "os.execvpe(args[0], args, env) def terminate(process, signal, message): if not process.alive():", "import basename, dirname, exists, realpath from os.path import join as", "O_CREAT, O_WRONLY, O_APPEND from os.path import basename, dirname, exists, realpath", "os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def start(process, options): if", "in daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java", "e) sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose = options.verbose o.install_path =", "'%s' is empty\" % self.path) try: pid = int(line) except", "if o.verbose: print_options(o) try: handle_command(command, o) except SystemExit: raise except", "self.locked, 'pid file is locked by us' self.pid_file.seek(0) line =", "% options.config_path) if not exists(options.jvm_config): raise Exception('JVM config file is", "garbage: %s\" % (self.path, line)) if pid <= 0: raise", "= ['-D%s=%s' % i for i in properties.items()] classpath =", "[options] command', description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir',", "args, env) def terminate(process, signal, message): if not process.alive(): print('Not", "i for i in properties.items()] classpath = pathjoin(options.install_path, 'lib', '*')", "o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser, options.properties", "if '=' not in arg: parser.error('property is malformed: %s' %", "-D option (use --log-levels-file)') properties[key] = value return properties def", "platform.system() + '-' + platform.machine() shim = pathjoin(options.install_path, 'bin', 'procname',", "= os.fork() if pid > 0: process.write_pid(pid) print('Started as %s'", "to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults", "% (i, getattr(options, i))) print(\"\") class Options: pass def main():", "e: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose", "== 'run': run(process, options) elif command == 'start': start(process, options)", "[] for line in open(f, 'r').readlines(): line = line.strip() if", "an exclusive lock (inheritable) on a file\"\"\" try: flock(f, LOCK_EX", "python import errno import os import platform import sys import", "o.properties: o.properties[k] = v o.arguments = options.arguments or [] if", "realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc'))", "\"\"\"Redirect stdin to /dev/null\"\"\" fd = os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno())", "'node.properties')) o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config", "redirect_output(log) os.close(log) os.execvpe(args[0], args, env) def terminate(process, signal, message): if", "create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon): if not", "os.close(log) os.execvpe(args[0], args, env) def terminate(process, signal, message): if not", "append mode\"\"\" # noinspection PyTypeChecker return os.open(f, O_WRONLY | O_APPEND", "os.kill(pid, signal) except OSError as e: if e.errno != errno.ESRCH:", "reference 'etc/xyz' from within their config files: log.levels-file=etc/log.properties \"\"\" if", "(pid, e)) if not process.alive(): process.clear_pid() break sleep(0.1) print('%s %s'", "exists(options.config_path): raise Exception('Config file is missing: %s' % options.config_path) if", "env['PROCNAME'] = process_name return command, env def run(process, options): if", "LOCK_NB) return True except (IOError, OSError): # IOError in Python", "DATA_DIR/var/log/launcher.log (only in daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log", "a file, ignoring blank or comment lines\"\"\" lines = []", "OSError as e: if e.errno != errno.ENOENT: raise return False", "os.remove(target) if exists(source): os.symlink(source, target) def create_app_symlinks(options): \"\"\" Symlink the", "= [i.strip() for i in arg.split('=', 1)] if key ==", "in node_properties.items(): if k not in o.properties: o.properties[k] = v", "print('Started as %s' % pid) return if hasattr(os, \"set_inheritable\"): #", "%s' % pid) return if hasattr(os, \"set_inheritable\"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors", "o.log_levels_set = bool(options.log_levels_file) if options.node_config and not exists(o.node_config): parser.error('Node config", "errno import os import platform import sys import traceback from", "lines def try_lock(f): \"\"\"Try to open an exclusive lock (inheritable)", "{} if exists(o.node_config): node_properties = load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir", "metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir',", "os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): \"\"\"Check if symlink exists and raise", "\"\"\"Find canonical parent of bin/launcher.py\"\"\" if basename(f) != 'launcher.py': raise", "parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties')", "node_properties.items(): if k not in o.properties: o.properties[k] = v o.arguments", "return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+') class Process: def", "'%s' contains garbage: %s\" % (self.path, line)) if pid <=", "def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) + '\\n') self.pid_file.flush() def alive(self):", "= line.split('=', 1) properties[k.strip()] = v.strip() return properties def load_lines(f):", "log.levels-file=etc/log.properties \"\"\" if options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir,", "except Exception as e: if o.verbose: traceback.print_exc() else: print('ERROR: %s'", "of file exists\"\"\" try: st = os.lstat(p) if not S_ISLNK(st.st_mode):", "metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file',", "parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties')", "find_install_path(f): \"\"\"Find canonical parent of bin/launcher.py\"\"\" if basename(f) != 'launcher.py':", "'bin', 'procname', system, 'libprocname.so') if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '')", "['-D%s=%s' % i for i in properties.items()] classpath = pathjoin(options.install_path,", "node_properties = load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or", "os.chdir(options.data_dir) pid = os.fork() if pid > 0: process.write_pid(pid) print('Started", "= realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config or pathjoin(o.etc_dir,", "command = args[0] if command not in COMMANDS: parser.error('unsupported command:", "'var/log/server.log')) o.properties = parse_properties(parser, options.properties or {}) for k, v", "noinspection PyTypeChecker return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644)", "lines from a file, ignoring blank or comment lines\"\"\" lines", "redirect_output(fd): \"\"\"Redirect stdout and stderr to a file descriptor\"\"\" os.dup2(fd,", "0) return True except OSError as e: raise Exception('Signaling pid", "in o.properties: o.properties[k] = v o.arguments = options.arguments or []", "raise Exception(\"Expected file '%s' to be 'launcher.py' not '%s'\" %", "raise Exception('Launcher config file is missing: %s' % options.launcher_config) if", "COMMANDS: parser.error('unsupported command: %s' % command) try: install_path = find_install_path(sys.argv[0])", "is missing: %s' % options.config_path) if not exists(options.jvm_config): raise Exception('JVM", "% command) try: install_path = find_install_path(sys.argv[0]) except Exception as e:", "= parse_properties(parser, options.properties or {}) for k, v in node_properties.items():", "read/write mode (without truncating it)\"\"\" return os.fdopen(os.open(f, O_RDWR | O_CREAT,", "basename(p) != 'bin': raise Exception(\"Expected file '%s' directory to be", "def redirect_output(fd): \"\"\"Redirect stdout and stderr to a file descriptor\"\"\"", "an invalid pid: %s\" % (self.path, pid)) return pid def", "parser.error('cannot specify server log using -D option (use --server-log-file)') if", "parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties')", "= parser.parse_args() if len(args) != 1: if len(args) == 0:", "pid = self.read_pid() try: os.kill(pid, 0) return True except OSError", "hasattr(os, \"set_inheritable\"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4 os.set_inheritable(process.pid_file.fileno(),", "o.verbose = options.verbose o.install_path = install_path o.launcher_config = realpath(options.launcher_config or", "Python 2, OSError in Python 3. return False def open_read_write(f,", "\"set_inheritable\"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True)", "import traceback from fcntl import flock, LOCK_EX, LOCK_NB from optparse", "not locked by us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid): self.clear_pid()", "program argument of the Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to", "create_symlink(source, target): \"\"\"Create a symlink, removing the target first if", "pathjoin from signal import SIGTERM, SIGKILL from stat import S_ISLNK", "jvm_properties = load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try: main_class = launcher_properties['main-class']", "from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND from os.path import", "properties.items()] classpath = pathjoin(options.install_path, 'lib', '*') command = ['java', '-cp',", "= realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir,", "os.lstat(p) if not S_ISLNK(st.st_mode): raise Exception('Path exists and is not", "try: os.kill(pid, 0) return True except OSError as e: raise", "their config files: log.levels-file=etc/log.properties \"\"\" if options.etc_dir != pathjoin(options.data_dir, 'etc'):", "elif command == 'stop': stop(process) elif command == 'restart': stop(process)", "+= options.arguments if options.verbose: print(command) print(\"\") env = os.environ.copy() #", "us' self.pid_file.seek(0) line = self.pid_file.readline().strip() if len(line) == 0: raise", "return parser def parse_properties(parser, args): properties = {} for arg", "= build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env)", "metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config',", "command not in COMMANDS: parser.error('unsupported command: %s' % command) try:", "'config.properties')) o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file)", "from a file, ignoring blank or comment lines\"\"\" lines =", "0o644) def redirect_output(fd): \"\"\"Redirect stdout and stderr to a file", "using -D option (use --log-levels-file)') properties[key] = value return properties", "exists, realpath from os.path import join as pathjoin from signal", "description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults", "(f, basename(p))) return dirname(p) def makedirs(p): \"\"\"Create directory and all", "load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or data_dir or", "help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR',", "process = Process(options.pid_file) if command == 'run': run(process, options) elif", "sys.stderr.fileno()) def symlink_exists(p): \"\"\"Check if symlink exists and raise if", "options.verbose: for i in sorted(vars(options)): print(\"%-15s = %s\" % (i,", "# set process name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '') if", "canonical parent of bin/launcher.py\"\"\" if basename(f) != 'launcher.py': raise Exception(\"Expected", "exists and is not a symlink: %s' % p) return", "True except OSError as e: raise Exception('Signaling pid %s failed:", "main(): parser = create_parser() (options, args) = parser.parse_args() if len(args)", "0o600) self.refresh() def refresh(self): self.locked = try_lock(self.pid_file) def clear_pid(self): assert", "redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args, env) def terminate(process, signal, message):", "2, OSError in Python 3. return False def open_read_write(f, mode):", "within their config files: log.levels-file=etc/log.properties \"\"\" if options.etc_dir != pathjoin(options.data_dir,", "a symlink\"\"\" if symlink_exists(target): os.remove(target) if exists(source): os.symlink(source, target) def", "realpath(options.data_dir or data_dir or o.install_path) o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir,", "== 'config': parser.error('cannot specify config using -D option (use --config)')", "!= errno.ESRCH: raise Exception('Signaling pid %s failed: %s' % (pid,", "% (pid, e)) if not process.alive(): process.clear_pid() break sleep(0.1) print('%s", "open_read_write(f, mode): \"\"\"Open file in read/write mode (without truncating it)\"\"\"", "command == 'kill': kill(process) elif command == 'status': status(process) else:", "shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so') if exists(shim): env['LD_PRELOAD']", "st = os.lstat(p) if not S_ISLNK(st.st_mode): raise Exception('Path exists and", "if options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def", "'procname', system, 'libprocname.so') if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') +", "'%s' directory to be 'bin' not '%s\" % (f, basename(p)))", "def handle_command(command, options): process = Process(options.pid_file) if command == 'run':", "o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file or", "parser = create_parser() (options, args) = parser.parse_args() if len(args) !=", "be 'bin' not '%s\" % (f, basename(p))) return dirname(p) def", "a program argument of the Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults", "+ command) def create_parser(): commands = 'Commands: ' + ',", "from optparse import OptionParser from os import O_RDWR, O_CREAT, O_WRONLY,", "'r+') class Process: def __init__(self, path): makedirs(dirname(path)) self.path = path", "def run(process, options): if process.alive(): print('Already running as %s' %", "for i in arg.split('=', 1)] if key == 'config': parser.error('cannot", "realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log'))", "if basename(p) != 'bin': raise Exception(\"Expected file '%s' directory to", "%s' % e) sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose = options.verbose", "return dirname(p) def makedirs(p): \"\"\"Create directory and all intermediate ones\"\"\"", "IOError in Python 2, OSError in Python 3. return False", "'launcher.py': raise Exception(\"Expected file '%s' to be 'launcher.py' not '%s'\"", "= realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser, options.properties or", "help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE',", "config files: log.levels-file=etc/log.properties \"\"\" if options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink(", "import join as pathjoin from signal import SIGTERM, SIGKILL from", "return create_app_symlinks(options) args, env = build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log =", "'config': parser.error('cannot specify config using -D option (use --config)') if", "env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' + shim).strip() env['PROCNAME'] =", "AssertionError('Unhandled command: ' + command) def create_parser(): commands = 'Commands:", "e.errno != errno.ESRCH: raise Exception('Signaling pid %s failed: %s' %", "% self.path) try: pid = int(line) except ValueError: raise Exception(\"Pid", "4 def find_install_path(f): \"\"\"Find canonical parent of bin/launcher.py\"\"\" if basename(f)", "first if it is a symlink\"\"\" if symlink_exists(target): os.remove(target) if", "main_class = launcher_properties['main-class'] except KeyError: raise Exception(\"Launcher config is missing", "os.path import join as pathjoin from signal import SIGTERM, SIGKILL", "Exception as e: if o.verbose: traceback.print_exc() else: print('ERROR: %s' %", "%s' % p) return True except OSError as e: if", "file '%s' contains garbage: %s\" % (self.path, line)) if pid", "a file\"\"\" properties = {} for line in load_lines(f): k,", "except (IOError, OSError): # IOError in Python 2, OSError in", "errno.ESRCH: raise Exception('Signaling pid %s failed: %s' % (pid, e))", "as %s' % process.read_pid()) def handle_command(command, options): process = Process(options.pid_file)", "'stop': stop(process) elif command == 'restart': stop(process) start(process, options) elif", "help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE',", "self.pid_file = open_read_write(path, 0o600) self.refresh() def refresh(self): self.locked = try_lock(self.pid_file)", "'pid file is locked by us' self.pid_file.seek(0) line = self.pid_file.readline().strip()", "def read_pid(self): assert not self.locked, 'pid file is locked by", "launcher_properties.get('process-name', '') if len(process_name) > 0: system = platform.system() +", "process.alive(): process.clear_pid() break sleep(0.1) print('%s %s' % (message, pid)) def", "from stat import S_ISLNK from time import sleep COMMANDS =", "Process(options.pid_file) if command == 'run': run(process, options) elif command ==", "#!/usr/bin/env python import errno import os import platform import sys", "fcntl import flock, LOCK_EX, LOCK_NB from optparse import OptionParser from", "env) def start(process, options): if process.alive(): print('Already running as %s'", "INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to", "o.verbose: print_options(o) try: handle_command(command, o) except SystemExit: raise except Exception", "\"\"\"Redirect stdout and stderr to a file descriptor\"\"\" os.dup2(fd, sys.stdout.fileno())", "(self.path, line)) if pid <= 0: raise Exception(\"Pid file '%s'", "pid) return if hasattr(os, \"set_inheritable\"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since", "'pid file not locked by us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self,", "parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a program argument of the", "realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties'))", "ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to", "print(\"\") class Options: pass def main(): parser = create_parser() (options,", "0: system = platform.system() + '-' + platform.machine() shim =", "os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f): \"\"\"Open a raw file descriptor", "metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg',", "classpath] command += jvm_properties + system_properties command += [main_class] command", "= value return properties def print_options(options): if options.verbose: for i", "print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as %s' % process.read_pid()) def handle_command(command,", "'false' jvm_properties = load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try: main_class =", "sys.exit(LSB_NOT_RUNNING) print('Running as %s' % process.read_pid()) def handle_command(command, options): process", "the target first if it is a symlink\"\"\" if symlink_exists(target):", "':' + shim).strip() env['PROCNAME'] = process_name return command, env def", "== 'status': status(process) else: raise AssertionError('Unhandled command: ' + command)", "in Python 3. return False def open_read_write(f, mode): \"\"\"Open file", "pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config =", "DATA_DIR/var/log/server.log (only in daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set", "% process.read_pid()) def handle_command(command, options): process = Process(options.pid_file) if command", "pid def redirect_stdin_to_devnull(): \"\"\"Redirect stdin to /dev/null\"\"\" fd = os.open(os.devnull,", "return True except (IOError, OSError): # IOError in Python 2,", "| O_CREAT, mode), 'r+') class Process: def __init__(self, path): makedirs(dirname(path))", "0: parser.error('command name not specified') else: parser.error('too many arguments') command", "shim).strip() env['PROCNAME'] = process_name return command, env def run(process, options):", "= {} if exists(o.node_config): node_properties = load_properties(o.node_config) data_dir = node_properties.get('node.data-dir')", "'bin': raise Exception(\"Expected file '%s' directory to be 'bin' not", "exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' + shim).strip() env['PROCNAME']", "= realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file) if options.node_config", "e: if e.errno != errno.EEXIST: raise def load_properties(f): \"\"\"Load key/value", "as e: raise Exception('Signaling pid %s failed: %s' % (pid,", "o.install_path) o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file", "(use --server-log-file)') if key == 'log.levels-file': parser.error('cannot specify log levels", "if o.verbose: traceback.print_exc() else: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) if", "\"\"\"Create directory and all intermediate ones\"\"\" try: os.makedirs(p) except OSError", "help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a program", "help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE',", "is not a symlink: %s' % p) return True except", "help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE',", "= process_name return command, env def run(process, options): if process.alive():", "type of file exists\"\"\" try: st = os.lstat(p) if not", "and raise if another type of file exists\"\"\" try: st", "= 4 def find_install_path(f): \"\"\"Find canonical parent of bin/launcher.py\"\"\" if", "i in arg.split('=', 1)] if key == 'config': parser.error('cannot specify", "removing the target first if it is a symlink\"\"\" if", "OSError as e: if e.errno != errno.ESRCH: raise Exception('Signaling pid", "is missing 'main-class' property\") properties['config'] = options.config_path system_properties = ['-D%s=%s'", "'log.properties')) o.log_levels_set = bool(options.log_levels_file) if options.node_config and not exists(o.node_config): parser.error('Node", "parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)') parser.add_option('--server-log-file',", "!= errno.EEXIST: raise def load_properties(f): \"\"\"Load key/value pairs from a", "= open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork() if pid >", "options.arguments if options.verbose: print(command) print(\"\") env = os.environ.copy() # set", "not in o.properties: o.properties[k] = v o.arguments = options.arguments or", "elif command == 'start': start(process, options) elif command == 'stop':", "0: raise Exception(\"Pid file '%s' contains an invalid pid: %s\"", "'-' + platform.machine() shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so')", "file\"\"\" properties = {} for line in load_lines(f): k, v", "os.execvpe(args[0], args, env) def start(process, options): if process.alive(): print('Already running", "file in read/write mode (without truncating it)\"\"\" return os.fdopen(os.open(f, O_RDWR", "parser.error('Node config file is missing: %s' % o.node_config) node_properties =", "o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or", "= 3 LSB_STATUS_UNKNOWN = 4 def find_install_path(f): \"\"\"Find canonical parent", "len(process_name) > 0: system = platform.system() + '-' + platform.machine()", "malformed: %s' % arg) key, value = [i.strip() for i", "def refresh(self): self.locked = try_lock(self.pid_file) def clear_pid(self): assert self.locked, 'pid", "(use --config)') if key == 'log.output-file': parser.error('cannot specify server log", "status(process): if not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as %s'", "!= pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path !=", "load_properties(options.launcher_config) try: main_class = launcher_properties['main-class'] except KeyError: raise Exception(\"Launcher config", "def makedirs(p): \"\"\"Create directory and all intermediate ones\"\"\" try: os.makedirs(p)", "parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system property') return", "symlink, removing the target first if it is a symlink\"\"\"", "platform import sys import traceback from fcntl import flock, LOCK_EX,", "'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file", "build_java_execution(options, daemon): if not exists(options.config_path): raise Exception('Config file is missing:", "it is a symlink\"\"\" if symlink_exists(target): os.remove(target) if exists(source): os.symlink(source,", "'') if len(process_name) > 0: system = platform.system() + '-'", "= node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or data_dir or o.install_path) o.pid_file", "o) except SystemExit: raise except Exception as e: if o.verbose:", "log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork() if pid", "not '%s\" % (f, basename(p))) return dirname(p) def makedirs(p): \"\"\"Create", "default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE',", "target first if it is a symlink\"\"\" if symlink_exists(target): os.remove(target)", "pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log =", "os.kill(pid, 0) return True except OSError as e: raise Exception('Signaling", "exists(options.log_levels): properties['log.levels-file'] = options.log_levels if daemon: properties['log.output-file'] = options.server_log properties['log.enable-console']", "(only in daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only", "env = build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args,", "/dev/null\"\"\" fd = os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f):", "or pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set", "file, ignoring blank or comment lines\"\"\" lines = [] for", "pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set =", "for i in sorted(vars(options)): print(\"%-15s = %s\" % (i, getattr(options,", "to /dev/null\"\"\" fd = os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def", "daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon", "pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser, options.properties or {}) for k,", "the 'etc' and 'plugin' directory into the data directory. This", "Exception('Signaling pid %s failed: %s' % (pid, e)) if not", "is empty\" % self.path) try: pid = int(line) except ValueError:", "\"\"\" if options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc'))", "self.read_pid() try: os.kill(pid, 0) return True except OSError as e:", "levels using -D option (use --log-levels-file)') properties[key] = value return", "+ system_properties command += [main_class] command += options.arguments if options.verbose:", "properties = {} for arg in args: if '=' not", "options.config_path) if not exists(options.jvm_config): raise Exception('JVM config file is missing:", "build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def", "kill(process) elif command == 'status': status(process) else: raise AssertionError('Unhandled command:", "https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '') if len(process_name) > 0: system", "def alive(self): self.refresh() if self.locked: return False pid = self.read_pid()", "print('Running as %s' % process.read_pid()) def handle_command(command, options): process =", "parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to", "file not locked by us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid):", "raise AssertionError('Unhandled command: ' + command) def create_parser(): commands =", "application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)')", "= self.pid_file.readline().strip() if len(line) == 0: raise Exception(\"Pid file '%s'", "data_dir = node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or data_dir or o.install_path)", "Exception(\"Pid file '%s' contains garbage: %s\" % (self.path, line)) if", "def main(): parser = create_parser() (options, args) = parser.parse_args() if", "start(process, options) elif command == 'stop': stop(process) elif command ==", "config using -D option (use --config)') if key == 'log.output-file':", "from a file\"\"\" properties = {} for line in load_lines(f):", "using -D option (use --server-log-file)') if key == 'log.levels-file': parser.error('cannot", "import sleep COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status']", "optparse import OptionParser from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND", "descriptor\"\"\" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): \"\"\"Check if symlink", "open an exclusive lock (inheritable) on a file\"\"\" try: flock(f,", "'Stopped') def kill(process): terminate(process, SIGKILL, 'Killed') def status(process): if not", "key/value pairs from a file\"\"\" properties = {} for line", "to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults", "INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments',", "= realpath(options.data_dir or data_dir or o.install_path) o.pid_file = realpath(options.pid_file or", "True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork()", "of the Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only", "os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+') class Process: def __init__(self,", "[] if o.verbose: print_options(o) try: handle_command(command, o) except SystemExit: raise", "if len(args) == 0: parser.error('command name not specified') else: parser.error('too", "= realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir,", "action='append', metavar='ARG', dest='arguments', help='Add a program argument of the Java", "alive(self): self.refresh() if self.locked: return False pid = self.read_pid() try:", "False pid = self.read_pid() try: os.kill(pid, 0) return True except", "command += [main_class] command += options.arguments if options.verbose: print(command) print(\"\")", "o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config or", "% (self.path, pid)) return pid def redirect_stdin_to_devnull(): \"\"\"Redirect stdin to", "return create_app_symlinks(options) args, env = build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid())", "parser.parse_args() if len(args) != 1: if len(args) == 0: parser.error('command", "create_app_symlinks(options) args, env = build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull()", "pid)) def stop(process): terminate(process, SIGTERM, 'Stopped') def kill(process): terminate(process, SIGKILL,", "and not exists(options.log_levels): raise Exception('Log levels file is missing: %s'", "file exists\"\"\" try: st = os.lstat(p) if not S_ISLNK(st.st_mode): raise", "into the data directory. This is needed to support programs", "to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG',", "join as pathjoin from signal import SIGTERM, SIGKILL from stat", "Exception(\"Expected file '%s' to be 'launcher.py' not '%s'\" % (f,", "%s' % process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, False)", "read_pid(self): assert not self.locked, 'pid file is locked by us'", "process_name return command, env def run(process, options): if process.alive(): print('Already", "| O_APPEND | O_CREAT, 0o644) def redirect_output(fd): \"\"\"Redirect stdout and", "to open an exclusive lock (inheritable) on a file\"\"\" try:", "k, v = line.split('=', 1) properties[k.strip()] = v.strip() return properties", "stat import S_ISLNK from time import sleep COMMANDS = ['run',", "return pid = process.read_pid() while True: try: os.kill(pid, signal) except", "lines.append(line) return lines def try_lock(f): \"\"\"Try to open an exclusive", "bin/launcher.py\"\"\" if basename(f) != 'launcher.py': raise Exception(\"Expected file '%s' to", "ValueError: raise Exception(\"Pid file '%s' contains garbage: %s\" % (self.path,", "class Process: def __init__(self, path): makedirs(dirname(path)) self.path = path self.pid_file", "in arg.split('=', 1)] if key == 'config': parser.error('cannot specify config", "True except (IOError, OSError): # IOError in Python 2, OSError", "makedirs(p): \"\"\"Create directory and all intermediate ones\"\"\" try: os.makedirs(p) except", "makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork() if pid > 0: process.write_pid(pid)", "help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE',", "Exception(\"Launcher config is missing 'main-class' property\") properties['config'] = options.config_path system_properties", "command: ' + command) def create_parser(): commands = 'Commands: '", "action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system property') return parser", "This is needed to support programs that reference 'etc/xyz' from", "def clear_pid(self): assert self.locked, 'pid file not locked by us'", "as e: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) o = Options()", "flock(f, LOCK_EX | LOCK_NB) return True except (IOError, OSError): #", "line = self.pid_file.readline().strip() if len(line) == 0: raise Exception(\"Pid file", "= OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands) parser.add_option('-v', '--verbose', action='store_true',", "many arguments') command = args[0] if command not in COMMANDS:", "% (f, basename(f))) p = realpath(dirname(f)) if basename(p) != 'bin':", "def __init__(self, path): makedirs(dirname(path)) self.path = path self.pid_file = open_read_write(path,", "command == 'status': status(process) else: raise AssertionError('Unhandled command: ' +", "using -D option (use --config)') if key == 'log.output-file': parser.error('cannot", "stop(process) start(process, options) elif command == 'kill': kill(process) elif command", "missing: %s' % options.config_path) if not exists(options.jvm_config): raise Exception('JVM config", "missing: %s' % options.jvm_config) if not exists(options.launcher_config): raise Exception('Launcher config", "{} for line in load_lines(f): k, v = line.split('=', 1)", "specify log levels using -D option (use --log-levels-file)') properties[key] =", "running') sys.exit(LSB_NOT_RUNNING) print('Running as %s' % process.read_pid()) def handle_command(command, options):", "action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config',", "as e: if e.errno != errno.ESRCH: raise Exception('Signaling pid %s", "properties = options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] = options.log_levels if daemon:", "= options.server_log properties['log.enable-console'] = 'false' jvm_properties = load_lines(options.jvm_config) launcher_properties =", "basename(p))) return dirname(p) def makedirs(p): \"\"\"Create directory and all intermediate", "or comment lines\"\"\" lines = [] for line in open(f,", "print_options(options): if options.verbose: for i in sorted(vars(options)): print(\"%-15s = %s\"", "traceback from fcntl import flock, LOCK_EX, LOCK_NB from optparse import", "(i, getattr(options, i))) print(\"\") class Options: pass def main(): parser", "except KeyError: raise Exception(\"Launcher config is missing 'main-class' property\") properties['config']", "install_path o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir", "O_RDWR, O_CREAT, O_WRONLY, O_APPEND from os.path import basename, dirname, exists,", "= Options() o.verbose = options.verbose o.install_path = install_path o.launcher_config =", "(options, args) = parser.parse_args() if len(args) != 1: if len(args)", "if not exists(options.jvm_config): raise Exception('JVM config file is missing: %s'", "contains garbage: %s\" % (self.path, line)) if pid <= 0:", "= ['run', 'start', 'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING = 3", "if options.node_config and not exists(o.node_config): parser.error('Node config file is missing:", "us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) + '\\n')", "try_lock(f): \"\"\"Try to open an exclusive lock (inheritable) on a", "%s' % process.read_pid()) def handle_command(command, options): process = Process(options.pid_file) if", "raw file descriptor in append mode\"\"\" # noinspection PyTypeChecker return", "= bool(options.log_levels_file) if options.node_config and not exists(o.node_config): parser.error('Node config file", "%s\" % (self.path, pid)) return pid def redirect_stdin_to_devnull(): \"\"\"Redirect stdin", "that reference 'etc/xyz' from within their config files: log.levels-file=etc/log.properties \"\"\"", "refresh(self): self.locked = try_lock(self.pid_file) def clear_pid(self): assert self.locked, 'pid file", "print('Already running as %s' % process.read_pid()) return create_app_symlinks(options) args, env", "self.refresh() if self.locked: return False pid = self.read_pid() try: os.kill(pid,", "= pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so') if exists(shim): env['LD_PRELOAD'] =", "sleep(0.1) print('%s %s' % (message, pid)) def stop(process): terminate(process, SIGTERM,", "line.strip() if len(line) > 0 and not line.startswith('#'): lines.append(line) return", "pid > 0: process.write_pid(pid) print('Started as %s' % pid) return", "% i for i in properties.items()] classpath = pathjoin(options.install_path, 'lib',", "os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND from os.path import basename,", "metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config',", "properties[k.strip()] = v.strip() return properties def load_lines(f): \"\"\"Load lines from", "== 'log.output-file': parser.error('cannot specify server log using -D option (use", "False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def start(process,", "config file is missing: %s' % options.jvm_config) if not exists(options.launcher_config):", "%s failed: %s' % (pid, e)) if not process.alive(): process.clear_pid()", "'\\n') self.pid_file.flush() def alive(self): self.refresh() if self.locked: return False pid", "run(process, options) elif command == 'start': start(process, options) elif command", "terminate(process, SIGKILL, 'Killed') def status(process): if not process.alive(): print('Not running')", "file is missing: %s' % o.node_config) node_properties = {} if", "o.properties = parse_properties(parser, options.properties or {}) for k, v in", "+ ':' + shim).strip() env['PROCNAME'] = process_name return command, env", "create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path,", "= platform.system() + '-' + platform.machine() shim = pathjoin(options.install_path, 'bin',", "len(line) == 0: raise Exception(\"Pid file '%s' is empty\" %", "if not exists(options.launcher_config): raise Exception('Launcher config file is missing: %s'", "+ shim).strip() env['PROCNAME'] = process_name return command, env def run(process,", "!= 'bin': raise Exception(\"Expected file '%s' directory to be 'bin'", "'-cp', classpath] command += jvm_properties + system_properties command += [main_class]", "== 'log.levels-file': parser.error('cannot specify log levels using -D option (use", "name not specified') else: parser.error('too many arguments') command = args[0]", "properties['log.levels-file'] = options.log_levels if daemon: properties['log.output-file'] = options.server_log properties['log.enable-console'] =", "not exists(options.launcher_config): raise Exception('Launcher config file is missing: %s' %", "v = line.split('=', 1) properties[k.strip()] = v.strip() return properties def", "file '%s' directory to be 'bin' not '%s\" % (f,", "O_CREAT, mode), 'r+') class Process: def __init__(self, path): makedirs(dirname(path)) self.path", "print('Not running') return pid = process.read_pid() while True: try: os.kill(pid,", "by us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) +", "key == 'config': parser.error('cannot specify config using -D option (use", "truncating it)\"\"\" return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+') class", "== 'kill': kill(process) elif command == 'status': status(process) else: raise", "def create_parser(): commands = 'Commands: ' + ', '.join(COMMANDS) parser", "print_options(o) try: handle_command(command, o) except SystemExit: raise except Exception as", "try: os.makedirs(p) except OSError as e: if e.errno != errno.EEXIST:", "def open_append(f): \"\"\"Open a raw file descriptor in append mode\"\"\"", "parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH')", "in COMMANDS: parser.error('unsupported command: %s' % command) try: install_path =", "(env.get('LD_PRELOAD', '') + ':' + shim).strip() env['PROCNAME'] = process_name return", "3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args, env)", "i in properties.items()] classpath = pathjoin(options.install_path, 'lib', '*') command =", "%s\" % (i, getattr(options, i))) print(\"\") class Options: pass def", "3 LSB_STATUS_UNKNOWN = 4 def find_install_path(f): \"\"\"Find canonical parent of", "if options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if", "name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '') if len(process_name) > 0:", "(use --log-levels-file)') properties[key] = value return properties def print_options(options): if", "<filename>launcher/src/main/scripts/bin/launcher.py #!/usr/bin/env python import errno import os import platform import", "sys import traceback from fcntl import flock, LOCK_EX, LOCK_NB from", "options) elif command == 'start': start(process, options) elif command ==", "if exists(o.node_config): node_properties = load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir =", "def load_lines(f): \"\"\"Load lines from a file, ignoring blank or", "!= 'launcher.py': raise Exception(\"Expected file '%s' to be 'launcher.py' not", "exists(options.jvm_config): raise Exception('JVM config file is missing: %s' % options.jvm_config)", "ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to", "in Python 2, OSError in Python 3. return False def", "arg in args: if '=' not in arg: parser.error('property is", "'') + ':' + shim).strip() env['PROCNAME'] = process_name return command,", "metavar='ARG', dest='arguments', help='Add a program argument of the Java application')", "== 'stop': stop(process) elif command == 'restart': stop(process) start(process, options)", "= os.environ.copy() # set process name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name',", "parse_properties(parser, options.properties or {}) for k, v in node_properties.items(): if", "clear_pid(self): assert self.locked, 'pid file not locked by us' self.pid_file.seek(0)", "parser = OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands) parser.add_option('-v', '--verbose',", "LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN = 4 def find_install_path(f): \"\"\"Find canonical", "= install_path o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir =", "load_properties(f): \"\"\"Load key/value pairs from a file\"\"\" properties = {}", "if it is a symlink\"\"\" if symlink_exists(target): os.remove(target) if exists(source):", "= realpath(dirname(f)) if basename(p) != 'bin': raise Exception(\"Expected file '%s'", "DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a program argument of", "return True except OSError as e: if e.errno != errno.ENOENT:", "process.clear_pid() break sleep(0.1) print('%s %s' % (message, pid)) def stop(process):", "if command not in COMMANDS: parser.error('unsupported command: %s' % command)", "e: if o.verbose: traceback.print_exc() else: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN)", "o.node_config) node_properties = {} if exists(o.node_config): node_properties = load_properties(o.node_config) data_dir", "arguments') command = args[0] if command not in COMMANDS: parser.error('unsupported", "Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0],", "= [] for line in open(f, 'r').readlines(): line = line.strip()", "self.locked: return False pid = self.read_pid() try: os.kill(pid, 0) return", "= load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try: main_class = launcher_properties['main-class'] except", "return properties def print_options(options): if options.verbose: for i in sorted(vars(options)):", "options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] = options.log_levels if daemon: properties['log.output-file'] =", "and not line.startswith('#'): lines.append(line) return lines def try_lock(f): \"\"\"Try to", "return if hasattr(os, \"set_inheritable\"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python", "options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path", "mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system property')", "len(args) != 1: if len(args) == 0: parser.error('command name not", "e: if e.errno != errno.ENOENT: raise return False def create_symlink(source,", "line)) if pid <= 0: raise Exception(\"Pid file '%s' contains", "S_ISLNK from time import sleep COMMANDS = ['run', 'start', 'stop',", "pid: %s\" % (self.path, pid)) return pid def redirect_stdin_to_devnull(): \"\"\"Redirect", "the Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in", "elif command == 'restart': stop(process) start(process, options) elif command ==", "it)\"\"\" return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+') class Process:", "if not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as %s' %", "O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f): \"\"\"Open a raw file", "realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser, options.properties or {})", "KeyError: raise Exception(\"Launcher config is missing 'main-class' property\") properties['config'] =", "parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)') parser.add_option('-D',", "daemon): if not exists(options.config_path): raise Exception('Config file is missing: %s'", "e)) def read_pid(self): assert not self.locked, 'pid file is locked", "return False pid = self.read_pid() try: os.kill(pid, 0) return True", "a symlink: %s' % p) return True except OSError as", "realpath(dirname(f)) if basename(p) != 'bin': raise Exception(\"Expected file '%s' directory", "process.read_pid()) def handle_command(command, options): process = Process(options.pid_file) if command ==", "<= 0: raise Exception(\"Pid file '%s' contains an invalid pid:", "load_lines(f): k, v = line.split('=', 1) properties[k.strip()] = v.strip() return", "value return properties def print_options(options): if options.verbose: for i in", "options.properties or {}) for k, v in node_properties.items(): if k", "'bin' not '%s\" % (f, basename(p))) return dirname(p) def makedirs(p):", "realpath from os.path import join as pathjoin from signal import", "running as %s' % process.read_pid()) return create_app_symlinks(options) args, env =", "SystemExit: raise except Exception as e: if o.verbose: traceback.print_exc() else:", "line.split('=', 1) properties[k.strip()] = v.strip() return properties def load_lines(f): \"\"\"Load", "pid = int(line) except ValueError: raise Exception(\"Pid file '%s' contains", "not line.startswith('#'): lines.append(line) return lines def try_lock(f): \"\"\"Try to open", "server log using -D option (use --server-log-file)') if key ==", "running') return pid = process.read_pid() while True: try: os.kill(pid, signal)", "__init__(self, path): makedirs(dirname(path)) self.path = path self.pid_file = open_read_write(path, 0o600)", "or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties", "k not in o.properties: o.properties[k] = v o.arguments = options.arguments", "options) elif command == 'kill': kill(process) elif command == 'status':", "1)] if key == 'config': parser.error('cannot specify config using -D", "-D option (use --config)') if key == 'log.output-file': parser.error('cannot specify", "try: main_class = launcher_properties['main-class'] except KeyError: raise Exception(\"Launcher config is", "if exists(source): os.symlink(source, target) def create_app_symlinks(options): \"\"\" Symlink the 'etc'", "% options.launcher_config) if options.log_levels_set and not exists(options.log_levels): raise Exception('Log levels", "as e: if e.errno != errno.EEXIST: raise def load_properties(f): \"\"\"Load", "'restart', 'kill', 'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN = 4 def", "os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args, env) def", "parser.error('cannot specify log levels using -D option (use --log-levels-file)') properties[key]", "as %s' % process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options,", "is needed to support programs that reference 'etc/xyz' from within", "symlink exists and raise if another type of file exists\"\"\"", "--server-log-file)') if key == 'log.levels-file': parser.error('cannot specify log levels using", "Exception('Config file is missing: %s' % options.config_path) if not exists(options.jvm_config):", "file '%s' to be 'launcher.py' not '%s'\" % (f, basename(f)))", "'plugin')) def build_java_execution(options, daemon): if not exists(options.config_path): raise Exception('Config file", "command == 'restart': stop(process) start(process, options) elif command == 'kill':", "assert self.locked, 'pid file not locked by us' self.pid_file.seek(0) self.pid_file.truncate()", "line in open(f, 'r').readlines(): line = line.strip() if len(line) >", "self.path) try: pid = int(line) except ValueError: raise Exception(\"Pid file", "redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def start(process, options): if process.alive(): print('Already", "raise Exception('Path exists and is not a symlink: %s' %", "'main-class' property\") properties['config'] = options.config_path system_properties = ['-D%s=%s' % i", "'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN = 4 def find_install_path(f): \"\"\"Find", "parser def parse_properties(parser, args): properties = {} for arg in", "== 'restart': stop(process) start(process, options) elif command == 'kill': kill(process)", "raise Exception('Config file is missing: %s' % options.config_path) if not", "not in arg: parser.error('property is malformed: %s' % arg) key,", "= pathjoin(options.install_path, 'lib', '*') command = ['java', '-cp', classpath] command", "= Process(options.pid_file) if command == 'run': run(process, options) elif command", "LOCK_NB from optparse import OptionParser from os import O_RDWR, O_CREAT,", "def create_app_symlinks(options): \"\"\" Symlink the 'etc' and 'plugin' directory into", "to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults", "pass def main(): parser = create_parser() (options, args) = parser.parse_args()", "% process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, False) makedirs(options.data_dir)", "'run': run(process, options) elif command == 'start': start(process, options) elif", "getattr(options, i))) print(\"\") class Options: pass def main(): parser =", "of bin/launcher.py\"\"\" if basename(f) != 'launcher.py': raise Exception(\"Expected file '%s'", "from os.path import join as pathjoin from signal import SIGTERM,", "specified') else: parser.error('too many arguments') command = args[0] if command", "return command, env def run(process, options): if process.alive(): print('Already running", "= ['java', '-cp', classpath] command += jvm_properties + system_properties command", "mode (without truncating it)\"\"\" return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode),", "'kill', 'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN = 4 def find_install_path(f):", "specify config using -D option (use --config)') if key ==", "% arg) key, value = [i.strip() for i in arg.split('=',", "not exists(options.log_levels): raise Exception('Log levels file is missing: %s' %", "realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config'))", "options.log_levels if daemon: properties['log.output-file'] = options.server_log properties['log.enable-console'] = 'false' jvm_properties", "metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a", "node_properties = {} if exists(o.node_config): node_properties = load_properties(o.node_config) data_dir =", "pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon): if not exists(options.config_path):", "= options.verbose o.install_path = install_path o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path,", "help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults", "makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork() if", "command, env def run(process, options): if process.alive(): print('Already running as", "basename(f) != 'launcher.py': raise Exception(\"Expected file '%s' to be 'launcher.py'", "not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as %s' % process.read_pid())", "to be 'launcher.py' not '%s'\" % (f, basename(f))) p =", "= os.lstat(p) if not S_ISLNK(st.st_mode): raise Exception('Path exists and is", "stop(process): terminate(process, SIGTERM, 'Stopped') def kill(process): terminate(process, SIGKILL, 'Killed') def", "and 'plugin' directory into the data directory. This is needed", "and not exists(o.node_config): parser.error('Node config file is missing: %s' %", "pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties =", "except Exception as e: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) o", "OSError): # IOError in Python 2, OSError in Python 3.", "len(line) > 0 and not line.startswith('#'): lines.append(line) return lines def", "classpath = pathjoin(options.install_path, 'lib', '*') command = ['java', '-cp', classpath]", "SIGKILL from stat import S_ISLNK from time import sleep COMMANDS", "file is missing: %s' % options.jvm_config) if not exists(options.launcher_config): raise", "def print_options(options): if options.verbose: for i in sorted(vars(options)): print(\"%-15s =", "import sys import traceback from fcntl import flock, LOCK_EX, LOCK_NB", "data directory. This is needed to support programs that reference", "raise return False def create_symlink(source, target): \"\"\"Create a symlink, removing", "dirname, exists, realpath from os.path import join as pathjoin from", "= 'false' jvm_properties = load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try: main_class", "Options: pass def main(): parser = create_parser() (options, args) =", "except OSError as e: if e.errno != errno.EEXIST: raise def", "missing: %s' % o.node_config) node_properties = {} if exists(o.node_config): node_properties", "and is not a symlink: %s' % p) return True", "raise Exception('Log levels file is missing: %s' % options.log_levels) properties", "pairs from a file\"\"\" properties = {} for line in", "if hasattr(os, \"set_inheritable\"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4", "not '%s'\" % (f, basename(f))) p = realpath(dirname(f)) if basename(p)", "'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser,", "%s\" % (self.path, line)) if pid <= 0: raise Exception(\"Pid", "'plugin' directory into the data directory. This is needed to", "Exception('Launcher config file is missing: %s' % options.launcher_config) if options.log_levels_set", "o.install_path = install_path o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir", "raise def load_properties(f): \"\"\"Load key/value pairs from a file\"\"\" properties", "if exists(options.log_levels): properties['log.levels-file'] = options.log_levels if daemon: properties['log.output-file'] = options.server_log", "basename, dirname, exists, realpath from os.path import join as pathjoin", "o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or", "assert not self.locked, 'pid file is locked by us' self.pid_file.seek(0)", "Python 3. return False def open_read_write(f, mode): \"\"\"Open file in", "launcher_properties['main-class'] except KeyError: raise Exception(\"Launcher config is missing 'main-class' property\")", "metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config',", "specify server log using -D option (use --server-log-file)') if key", "option (use --log-levels-file)') properties[key] = value return properties def print_options(options):", "handle_command(command, o) except SystemExit: raise except Exception as e: if", "Exception('Log levels file is missing: %s' % options.log_levels) properties =", "a raw file descriptor in append mode\"\"\" # noinspection PyTypeChecker", "'.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands) parser.add_option('-v',", "% (f, basename(p))) return dirname(p) def makedirs(p): \"\"\"Create directory and", "return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644) def redirect_output(fd):", "OSError in Python 3. return False def open_read_write(f, mode): \"\"\"Open", "import SIGTERM, SIGKILL from stat import S_ISLNK from time import", "as e: if o.verbose: traceback.print_exc() else: print('ERROR: %s' % e)", "command', description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR',", "command) try: install_path = find_install_path(sys.argv[0]) except Exception as e: print('ERROR:", "= path self.pid_file = open_read_write(path, 0o600) self.refresh() def refresh(self): self.locked", "\"\"\"Open file in read/write mode (without truncating it)\"\"\" return os.fdopen(os.open(f,", "properties['config'] = options.config_path system_properties = ['-D%s=%s' % i for i", "def parse_properties(parser, args): properties = {} for arg in args:", "(IOError, OSError): # IOError in Python 2, OSError in Python", "not process.alive(): print('Not running') return pid = process.read_pid() while True:", "[i.strip() for i in arg.split('=', 1)] if key == 'config':", "errno.ENOENT: raise return False def create_symlink(source, target): \"\"\"Create a symlink,", "= process.read_pid() while True: try: os.kill(pid, signal) except OSError as", "usage='usage: %prog [options] command', description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run", "raise Exception(\"Expected file '%s' directory to be 'bin' not '%s\"", "(pid, e)) def read_pid(self): assert not self.locked, 'pid file is", "realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties'))", "help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append',", "parent of bin/launcher.py\"\"\" if basename(f) != 'launcher.py': raise Exception(\"Expected file", "or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path", "realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties'))", "system property') return parser def parse_properties(parser, args): properties = {}", "line.startswith('#'): lines.append(line) return lines def try_lock(f): \"\"\"Try to open an", "True except OSError as e: if e.errno != errno.ENOENT: raise", "https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log)", "handle_command(command, options): process = Process(options.pid_file) if command == 'run': run(process,", "' + command) def create_parser(): commands = 'Commands: ' +", "except ValueError: raise Exception(\"Pid file '%s' contains garbage: %s\" %", "OSError as e: if e.errno != errno.EEXIST: raise def load_properties(f):", "else: raise AssertionError('Unhandled command: ' + command) def create_parser(): commands", "properties[key] = value return properties def print_options(options): if options.verbose: for", "process.alive(): print('Not running') return pid = process.read_pid() while True: try:", "parser.error('too many arguments') command = args[0] if command not in", "env def run(process, options): if process.alive(): print('Already running as %s'", "exists and raise if another type of file exists\"\"\" try:", "sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose = options.verbose o.install_path = install_path", "= self.read_pid() try: os.kill(pid, 0) return True except OSError as", "+ platform.machine() shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so') if", "if pid > 0: process.write_pid(pid) print('Started as %s' % pid)", "create_app_symlinks(options) args, env = build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log)", "True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args, env) def terminate(process,", "'log.output-file': parser.error('cannot specify server log using -D option (use --server-log-file)')", "False def create_symlink(source, target): \"\"\"Create a symlink, removing the target", "== 'start': start(process, options) elif command == 'stop': stop(process) elif", "help='Set a Java system property') return parser def parse_properties(parser, args):", "= 'Commands: ' + ', '.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage:", "a Java system property') return parser def parse_properties(parser, args): properties", "= load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or data_dir", "command += jvm_properties + system_properties command += [main_class] command +=", "or [] if o.verbose: print_options(o) try: handle_command(command, o) except SystemExit:", "'etc')) if options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin'))", "'etc' and 'plugin' directory into the data directory. This is", "!= errno.ENOENT: raise return False def create_symlink(source, target): \"\"\"Create a", "in load_lines(f): k, v = line.split('=', 1) properties[k.strip()] = v.strip()", "open(f, 'r').readlines(): line = line.strip() if len(line) > 0 and", "as e: if e.errno != errno.ENOENT: raise return False def", "e: if e.errno != errno.ESRCH: raise Exception('Signaling pid %s failed:", "from fcntl import flock, LOCK_EX, LOCK_NB from optparse import OptionParser", "to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults", "from time import sleep COMMANDS = ['run', 'start', 'stop', 'restart',", "Exception(\"Pid file '%s' contains an invalid pid: %s\" % (self.path,", "directory and all intermediate ones\"\"\" try: os.makedirs(p) except OSError as", "create_app_symlinks(options): \"\"\" Symlink the 'etc' and 'plugin' directory into the", "to DATA_DIR/var/log/server.log (only in daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties',", "' + ', '.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage: %prog [options]", "% (message, pid)) def stop(process): terminate(process, SIGTERM, 'Stopped') def kill(process):", "needed to support programs that reference 'etc/xyz' from within their", "'%s\" % (f, basename(p))) return dirname(p) def makedirs(p): \"\"\"Create directory", "flock, LOCK_EX, LOCK_NB from optparse import OptionParser from os import", "mode), 'r+') class Process: def __init__(self, path): makedirs(dirname(path)) self.path =", "Exception(\"Pid file '%s' is empty\" % self.path) try: pid =", "options): process = Process(options.pid_file) if command == 'run': run(process, options)", "= build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid", "errno.EEXIST: raise def load_properties(f): \"\"\"Load key/value pairs from a file\"\"\"", "path self.pid_file = open_read_write(path, 0o600) self.refresh() def refresh(self): self.locked =", "config is missing 'main-class' property\") properties['config'] = options.config_path system_properties =", "(without truncating it)\"\"\" return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+')", "if process.alive(): print('Already running as %s' % process.read_pid()) return create_app_symlinks(options)", "is missing: %s' % options.log_levels) properties = options.properties.copy() if exists(options.log_levels):", "return pid def redirect_stdin_to_devnull(): \"\"\"Redirect stdin to /dev/null\"\"\" fd =", "os.makedirs(p) except OSError as e: if e.errno != errno.EEXIST: raise", "if key == 'config': parser.error('cannot specify config using -D option", "a file\"\"\" try: flock(f, LOCK_EX | LOCK_NB) return True except", "elif command == 'kill': kill(process) elif command == 'status': status(process)", "parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add", "args) = parser.parse_args() if len(args) != 1: if len(args) ==", "def stop(process): terminate(process, SIGTERM, 'Stopped') def kill(process): terminate(process, SIGKILL, 'Killed')", "is a symlink\"\"\" if symlink_exists(target): os.remove(target) if exists(source): os.symlink(source, target)", "line = line.strip() if len(line) > 0 and not line.startswith('#'):", "import flock, LOCK_EX, LOCK_NB from optparse import OptionParser from os", "for line in load_lines(f): k, v = line.split('=', 1) properties[k.strip()]", "O_APPEND from os.path import basename, dirname, exists, realpath from os.path", "target): \"\"\"Create a symlink, removing the target first if it", "if not process.alive(): process.clear_pid() break sleep(0.1) print('%s %s' % (message,", "options.jvm_config) if not exists(options.launcher_config): raise Exception('Launcher config file is missing:", "'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon): if not exists(options.config_path): raise", "return False def create_symlink(source, target): \"\"\"Create a symlink, removing the", "def build_java_execution(options, daemon): if not exists(options.config_path): raise Exception('Config file is", "= options.arguments or [] if o.verbose: print_options(o) try: handle_command(command, o)", "jvm_properties + system_properties command += [main_class] command += options.arguments if", "= v o.arguments = options.arguments or [] if o.verbose: print_options(o)", "traceback.print_exc() else: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) if __name__ ==", "file\"\"\" try: flock(f, LOCK_EX | LOCK_NB) return True except (IOError,", "properties def load_lines(f): \"\"\"Load lines from a file, ignoring blank", "= (env.get('LD_PRELOAD', '') + ':' + shim).strip() env['PROCNAME'] = process_name", "pid = process.read_pid() while True: try: os.kill(pid, signal) except OSError", "+ '\\n') self.pid_file.flush() def alive(self): self.refresh() if self.locked: return False", "self.path = path self.pid_file = open_read_write(path, 0o600) self.refresh() def refresh(self):", "(only in daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a", "key == 'log.output-file': parser.error('cannot specify server log using -D option", "Exception('JVM config file is missing: %s' % options.jvm_config) if not", "o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or", "+= jvm_properties + system_properties command += [main_class] command += options.arguments", "process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log", "build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid =", "to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults", "pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon): if not exists(options.config_path): raise Exception('Config" ]
[ "__init__(self): ShowBase.__init__(self) # Load environment model self.scene = self.loader.loadModel(\"models/environment\") #", "execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") # Load and transform panda actor self.pandaActor", "model to render self.scene.reparentTo(self.render) # Scale and position model self.scene.setScale(0.25,", "0.005) self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop(\"walk\") def spinCameraTask(self, task): angleDegs", "self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop(\"walk\") def spinCameraTask(self,", "# Load and transform panda actor self.pandaActor = Actor(\"models/panda-model\", {\"walk\":", "/ 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0)", "self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0) return Task.cont", "position model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42, 0) # Add", "Add spinCameraTask to task manager to execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") #", "# Loop animation self.pandaActor.loop(\"walk\") def spinCameraTask(self, task): angleDegs = task.time", "import ShowBase from direct.task import Task from direct.actor.Actor import Actor", "to execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") # Load and transform panda actor", "model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42, 0) # Add spinCameraTask", "and transform panda actor self.pandaActor = Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005,", "self.pandaActor.loop(\"walk\") def spinCameraTask(self, task): angleDegs = task.time * 6.0 angleRads", "direct.task import Task from direct.actor.Actor import Actor import numpy as", "from direct.actor.Actor import Actor import numpy as np class MyApp(ShowBase):", "0) # Add spinCameraTask to task manager to execute self.taskMgr.add(self.spinCameraTask,", "Task from direct.actor.Actor import Actor import numpy as np class", "angleRads = angleDegs * (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 *", "numpy as np class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) # Load", "= Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop", "= task.time * 6.0 angleRads = angleDegs * (np.pi /", "self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") # Load and transform panda actor self.pandaActor =", "{\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop(\"walk\")", "Loop animation self.pandaActor.loop(\"walk\") def spinCameraTask(self, task): angleDegs = task.time *", "3) self.camera.setHpr(angleDegs, 0, 0) return Task.cont app = MyApp() app.run()", "environment model self.scene = self.loader.loadModel(\"models/environment\") # Reparent model to render", "render self.scene.reparentTo(self.render) # Scale and position model self.scene.setScale(0.25, 0.25, 0.25)", "ShowBase.__init__(self) # Load environment model self.scene = self.loader.loadModel(\"models/environment\") # Reparent", "0.25, 0.25) self.scene.setPos(-8, 42, 0) # Add spinCameraTask to task", "42, 0) # Add spinCameraTask to task manager to execute", "import Task from direct.actor.Actor import Actor import numpy as np", "(np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0,", "import Actor import numpy as np class MyApp(ShowBase): def __init__(self):", "# Reparent model to render self.scene.reparentTo(self.render) # Scale and position", "180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0) return", "Load environment model self.scene = self.loader.loadModel(\"models/environment\") # Reparent model to", "spinCameraTask(self, task): angleDegs = task.time * 6.0 angleRads = angleDegs", "MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) # Load environment model self.scene =", "from direct.showbase.ShowBase import ShowBase from direct.task import Task from direct.actor.Actor", "class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) # Load environment model self.scene", "model self.scene = self.loader.loadModel(\"models/environment\") # Reparent model to render self.scene.reparentTo(self.render)", "* np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0) return Task.cont app =", "self.scene.setPos(-8, 42, 0) # Add spinCameraTask to task manager to", "Scale and position model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42, 0)", "self.scene = self.loader.loadModel(\"models/environment\") # Reparent model to render self.scene.reparentTo(self.render) #", "# Load environment model self.scene = self.loader.loadModel(\"models/environment\") # Reparent model", "0.25) self.scene.setPos(-8, 42, 0) # Add spinCameraTask to task manager", "= angleDegs * (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads),", "self.scene.reparentTo(self.render) # Scale and position model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8,", "and position model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42, 0) #", "Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop animation", "Actor import numpy as np class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self)", "to task manager to execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") # Load and", "np class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) # Load environment model", "* (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs,", "\"SpinCameraTask\") # Load and transform panda actor self.pandaActor = Actor(\"models/panda-model\",", "6.0 angleRads = angleDegs * (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0", "angleDegs * (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3)", "direct.actor.Actor import Actor import numpy as np class MyApp(ShowBase): def", "def spinCameraTask(self, task): angleDegs = task.time * 6.0 angleRads =", "angleDegs = task.time * 6.0 angleRads = angleDegs * (np.pi", "as np class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) # Load environment", "0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop(\"walk\") def spinCameraTask(self, task):", "actor self.pandaActor = Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render)", "animation self.pandaActor.loop(\"walk\") def spinCameraTask(self, task): angleDegs = task.time * 6.0", "to render self.scene.reparentTo(self.render) # Scale and position model self.scene.setScale(0.25, 0.25,", "task manager to execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") # Load and transform", "ShowBase from direct.task import Task from direct.actor.Actor import Actor import", "import numpy as np class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) #", "direct.showbase.ShowBase import ShowBase from direct.task import Task from direct.actor.Actor import", "panda actor self.pandaActor = Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005, 0.005)", "-20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0) return Task.cont app", "manager to execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") # Load and transform panda", "def __init__(self): ShowBase.__init__(self) # Load environment model self.scene = self.loader.loadModel(\"models/environment\")", "self.pandaActor = Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) #", "* 6.0 angleRads = angleDegs * (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads),", "Load and transform panda actor self.pandaActor = Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"})", "# Add spinCameraTask to task manager to execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\")", "\"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop(\"walk\") def", "<filename>code/sim/test.py from direct.showbase.ShowBase import ShowBase from direct.task import Task from", "from direct.task import Task from direct.actor.Actor import Actor import numpy", "Reparent model to render self.scene.reparentTo(self.render) # Scale and position model", "np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0) return Task.cont app = MyApp()", "# Scale and position model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42,", "spinCameraTask to task manager to execute self.taskMgr.add(self.spinCameraTask, \"SpinCameraTask\") # Load", "self.loader.loadModel(\"models/environment\") # Reparent model to render self.scene.reparentTo(self.render) # Scale and", "transform panda actor self.pandaActor = Actor(\"models/panda-model\", {\"walk\": \"models/panda-walk4\"}) self.pandaActor.setScale(0.005, 0.005,", "self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop(\"walk\") def spinCameraTask(self, task): angleDegs =", "task): angleDegs = task.time * 6.0 angleRads = angleDegs *", "task.time * 6.0 angleRads = angleDegs * (np.pi / 180.0)", "self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42, 0) # Add spinCameraTask to", "= self.loader.loadModel(\"models/environment\") # Reparent model to render self.scene.reparentTo(self.render) # Scale" ]
[]
[ "value in correlations.items() if value is not None } #", "custom sample Returns: This function returns a dictionary containing: -", "description \"table\": table_stats, # Per variable descriptions \"variables\": series_description, #", "if config.correlations[correlation_name].calculate ] number_of_tasks = 8 + len(df.columns) + len(correlation_names)", "duplicates\") metrics, duplicates = get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update() #", "[ correlation_name for correlation_name in [ \"pearson\", \"spearman\", \"kendall\", \"phi_k\",", "package details. \"\"\" if df is None: raise ValueError(\"Can not", "not in sample: sample[\"name\"] = None if \"caption\" not in", "calculation of statistics for each series in this DataFrame.\"\"\" import", "import pandas as pd from tqdm.auto import tqdm from visions", "column, type_name in variables.items() if type_name != \"Unsupported\" ] interval_columns", "pbar.set_postfix_str(\"Completed\") date_end = datetime.utcnow() analysis = { \"title\": config.title, \"date_start\":", "not None correlations = { key: value for key, value", "if df is None: raise ValueError(\"Can not describe a `lazy`", "# Messages pbar.set_postfix_str(\"Get messages/warnings\") messages = get_messages(config, table_stats, series_description, correlations)", "# missing diagrams pbar.set_postfix_str(\"Get missing diagrams\") missing = get_missing_diagrams(config, df,", "warnings from datetime import datetime from typing import Optional import", "pbar.update() # Sample pbar.set_postfix_str(\"Take sample\") if sample is None: samples", "in your data. - package: package details. \"\"\" if df", "total=number_of_tasks, desc=\"Summarize dataset\", disable=disable_progress_bar ) as pbar: series_description = get_series_descriptions(", "not isinstance(df, pd.DataFrame): warnings.warn(\"df is not of type pandas.DataFrame\") disable_progress_bar", "Per variable descriptions \"variables\": series_description, # Bivariate relations \"scatter\": scatter_matrix,", "!= \"Unsupported\" ] interval_columns = [ column for column, type_name", "containing: - table: overall statistics. - variables: descriptions per series.", "to these patterns in your data. - package: package details.", "\"title\": config.title, \"date_start\": date_start, \"date_end\": date_end, \"duration\": date_end - date_start,", "pbar.update() pbar.set_postfix_str(\"Completed\") date_end = datetime.utcnow() analysis = { \"title\": config.title,", "visions import VisionsTypeset from pandas_profiling.config import Settings from pandas_profiling.model.correlations import", "= get_messages(config, table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str(\"Get reproduction details\") package", "Warnings \"messages\": messages, # Package \"package\": package, # Sample \"sample\":", "pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample import Sample, get_sample from pandas_profiling.model.summarizer", "pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict] = None, )", "- missing: missing value diagrams. - messages: direct special attention", "ValueError(\"Can not describe a `lazy` ProfileReport without a DataFrame.\") if", "config.progress_bar date_start = datetime.utcnow() correlation_names = [ correlation_name for correlation_name", "return { # Analysis metadata \"analysis\": analysis, # Overall dataset", "correlations = {} for correlation_name in correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\")", "sample\") if sample is None: samples = get_sample(config, df) else:", "Scatter matrix pbar.set_postfix_str(\"Get scatter matrix\") scatter_matrix = get_scatter_matrix(config, df, interval_columns)", "caption=sample[\"caption\"], ) ] pbar.update() # Duplicates pbar.set_postfix_str(\"Locating duplicates\") metrics, duplicates", "( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, ) from pandas_profiling.version import", "BaseSummarizer from pandas_profiling.model.summary import ( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats,", "datetime import datetime from typing import Optional import pandas as", "\"package\": package, # Sample \"sample\": samples, # Duplicates \"duplicates\": duplicates,", "= get_missing_diagrams(config, df, table_stats) pbar.update() # Sample pbar.set_postfix_str(\"Take sample\") if", "patterns in your data. - package: package details. \"\"\" if", "attention to these patterns in your data. - package: package", "analysis = { \"title\": config.title, \"date_start\": date_start, \"date_end\": date_end, \"duration\":", "\"name\" not in sample: sample[\"name\"] = None if \"caption\" not", "statistics. - variables: descriptions per series. - correlations: correlation matrices.", "None } # Scatter matrix pbar.set_postfix_str(\"Get scatter matrix\") scatter_matrix =", "import ( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, ) from pandas_profiling.version", "pbar.set_postfix_str(\"Get table statistics\") table_stats = get_table_stats(config, df, series_description) pbar.update() #", "with custom sample Returns: This function returns a dictionary containing:", "else: if \"name\" not in sample: sample[\"name\"] = None if", "correlations, # Missing values \"missing\": missing, # Warnings \"messages\": messages,", "series. - correlations: correlation matrices. - missing: missing value diagrams.", "correlation_name in [ \"pearson\", \"spearman\", \"kendall\", \"phi_k\", \"cramers\", ] if", "table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str(\"Get messages/warnings\") messages = get_messages(config, table_stats,", "\"kendall\", \"phi_k\", \"cramers\", ] if config.correlations[correlation_name].calculate ] number_of_tasks = 8", "pbar ) pbar.set_postfix_str(\"Get variable types\") variables = { column: description[\"type\"]", "pbar.update() # Table statistics pbar.set_postfix_str(\"Get table statistics\") table_stats = get_table_stats(config,", "correlations: correlation matrices. - missing: missing value diagrams. - messages:", "variables = { column: description[\"type\"] for column, description in series_description.items()", "None correlations = { key: value for key, value in", "df) else: if \"name\" not in sample: sample[\"name\"] = None", "warnings.warn(\"df is not of type pandas.DataFrame\") disable_progress_bar = not config.progress_bar", "samples = [ Sample( id=\"custom\", data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"], ) ]", "metadata \"analysis\": analysis, # Overall dataset description \"table\": table_stats, #", "missing value diagrams. - messages: direct special attention to these", "description[\"type\"] for column, description in series_description.items() } supported_columns = [", "pbar.update() # Duplicates pbar.set_postfix_str(\"Locating duplicates\") metrics, duplicates = get_duplicates(config, df,", "import get_duplicates from pandas_profiling.model.sample import Sample, get_sample from pandas_profiling.model.summarizer import", "pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\") correlations[correlation_name] = calculate_correlation( config, df, correlation_name, series_description", "df, supported_columns) table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str(\"Get messages/warnings\") messages =", "pandas_profiling.config import Settings from pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates import", "pandas_profiling.version import __version__ def describe( config: Settings, df: pd.DataFrame, summarizer:", "\"analysis\": analysis, # Overall dataset description \"table\": table_stats, # Per", "for each series in this DataFrame.\"\"\" import warnings from datetime", "typeset: VisionsTypeset, sample: Optional[dict] = None, ) -> dict: \"\"\"Calculate", "datetime from typing import Optional import pandas as pd from", "[ column for column, type_name in variables.items() if type_name !=", "Optional[dict] = None, ) -> dict: \"\"\"Calculate the statistics for", "} # Scatter matrix pbar.set_postfix_str(\"Get scatter matrix\") scatter_matrix = get_scatter_matrix(config,", "get_sample(config, df) else: if \"name\" not in sample: sample[\"name\"] =", "correlations is not None correlations = { key: value for", "\"table\": table_stats, # Per variable descriptions \"variables\": series_description, # Bivariate", "- package: package details. \"\"\" if df is None: raise", "DataFrame. Args: config: report Settings object df: DataFrame. sample: optional,", "= datetime.utcnow() correlation_names = [ correlation_name for correlation_name in [", "df: DataFrame. sample: optional, dict with custom sample Returns: This", "Get correlations correlations = {} for correlation_name in correlation_names: pbar.set_postfix_str(f\"Calculate", "pbar.update() pbar.set_postfix_str(\"Get reproduction details\") package = { \"pandas_profiling_version\": __version__, \"pandas_profiling_config\":", "if type_name == \"Numeric\" ] pbar.update() # Get correlations correlations", "import warnings from datetime import datetime from typing import Optional", "# Correlation matrices \"correlations\": correlations, # Missing values \"missing\": missing,", "correlations.items() if value is not None } # Scatter matrix", "your data. - package: package details. \"\"\" if df is", "descriptions \"variables\": series_description, # Bivariate relations \"scatter\": scatter_matrix, # Correlation", "not of type pandas.DataFrame\") disable_progress_bar = not config.progress_bar date_start =", "number_of_tasks = 8 + len(df.columns) + len(correlation_names) with tqdm( total=number_of_tasks,", "series_description, # Bivariate relations \"scatter\": scatter_matrix, # Correlation matrices \"correlations\":", "get_scatter_matrix, get_series_descriptions, get_table_stats, ) from pandas_profiling.version import __version__ def describe(", "df, series_description) pbar.update() # missing diagrams pbar.set_postfix_str(\"Get missing diagrams\") missing", "correlations = { key: value for key, value in correlations.items()", "pd from tqdm.auto import tqdm from visions import VisionsTypeset from", "as pbar: series_description = get_series_descriptions( config, df, summarizer, typeset, pbar", "- date_start, } return { # Analysis metadata \"analysis\": analysis,", "get_table_stats, ) from pandas_profiling.version import __version__ def describe( config: Settings,", ") as pbar: series_description = get_series_descriptions( config, df, summarizer, typeset,", "get_series_descriptions( config, df, summarizer, typeset, pbar ) pbar.set_postfix_str(\"Get variable types\")", "correlations correlations = {} for correlation_name in correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name}", "metrics, duplicates = get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update() # Messages", "Settings from pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates import get_duplicates from", "interval_columns) pbar.update() # Table statistics pbar.set_postfix_str(\"Get table statistics\") table_stats =", "Analysis metadata \"analysis\": analysis, # Overall dataset description \"table\": table_stats,", ") ] pbar.update() # Duplicates pbar.set_postfix_str(\"Locating duplicates\") metrics, duplicates =", "df, interval_columns) pbar.update() # Table statistics pbar.set_postfix_str(\"Get table statistics\") table_stats", "package = { \"pandas_profiling_version\": __version__, \"pandas_profiling_config\": config.json(), } pbar.update() pbar.set_postfix_str(\"Completed\")", "- variables: descriptions per series. - correlations: correlation matrices. -", "series_description = get_series_descriptions( config, df, summarizer, typeset, pbar ) pbar.set_postfix_str(\"Get", "series in this DataFrame. Args: config: report Settings object df:", "a DataFrame.\") if not isinstance(df, pd.DataFrame): warnings.warn(\"df is not of", "for column, description in series_description.items() } supported_columns = [ column", "pbar.set_postfix_str(\"Get messages/warnings\") messages = get_messages(config, table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str(\"Get", "Package \"package\": package, # Sample \"sample\": samples, # Duplicates \"duplicates\":", "\"Numeric\" ] pbar.update() # Get correlations correlations = {} for", "not None } # Scatter matrix pbar.set_postfix_str(\"Get scatter matrix\") scatter_matrix", "table_stats, # Per variable descriptions \"variables\": series_description, # Bivariate relations", "returns a dictionary containing: - table: overall statistics. - variables:", "key, value in correlations.items() if value is not None }", "__version__, \"pandas_profiling_config\": config.json(), } pbar.update() pbar.set_postfix_str(\"Completed\") date_end = datetime.utcnow() analysis", "diagrams\") missing = get_missing_diagrams(config, df, table_stats) pbar.update() # Sample pbar.set_postfix_str(\"Take", "report Settings object df: DataFrame. sample: optional, dict with custom", "special attention to these patterns in your data. - package:", "direct special attention to these patterns in your data. -", "= { \"title\": config.title, \"date_start\": date_start, \"date_end\": date_end, \"duration\": date_end", "import tqdm from visions import VisionsTypeset from pandas_profiling.config import Settings", "\"phi_k\", \"cramers\", ] if config.correlations[correlation_name].calculate ] number_of_tasks = 8 +", "Missing values \"missing\": missing, # Warnings \"messages\": messages, # Package", "{ column: description[\"type\"] for column, description in series_description.items() } supported_columns", "analysis, # Overall dataset description \"table\": table_stats, # Per variable", "overall statistics. - variables: descriptions per series. - correlations: correlation", "= { column: description[\"type\"] for column, description in series_description.items() }", "\"\"\"Organize the calculation of statistics for each series in this", "sample Returns: This function returns a dictionary containing: - table:", "correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\") correlations[correlation_name] = calculate_correlation( config, df, correlation_name,", "import calculate_correlation from pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample import Sample,", "sample is None: samples = get_sample(config, df) else: if \"name\"", "config: report Settings object df: DataFrame. sample: optional, dict with", "table: overall statistics. - variables: descriptions per series. - correlations:", "Sample( id=\"custom\", data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"], ) ] pbar.update() # Duplicates", "a dictionary containing: - table: overall statistics. - variables: descriptions", "if \"caption\" not in sample: sample[\"caption\"] = None samples =", "get_table_stats(config, df, series_description) pbar.update() # missing diagrams pbar.set_postfix_str(\"Get missing diagrams\")", "the statistics for each series in this DataFrame. Args: config:", "missing diagrams\") missing = get_missing_diagrams(config, df, table_stats) pbar.update() # Sample", "if type_name != \"Unsupported\" ] interval_columns = [ column for", "correlations) pbar.update() pbar.set_postfix_str(\"Get reproduction details\") package = { \"pandas_profiling_version\": __version__,", "sample: optional, dict with custom sample Returns: This function returns", "correlation_names = [ correlation_name for correlation_name in [ \"pearson\", \"spearman\",", "date_end, \"duration\": date_end - date_start, } return { # Analysis", "\"correlations\": correlations, # Missing values \"missing\": missing, # Warnings \"messages\":", "sure correlations is not None correlations = { key: value", "== \"Numeric\" ] pbar.update() # Get correlations correlations = {}", "None samples = [ Sample( id=\"custom\", data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"], )", "statistics pbar.set_postfix_str(\"Get table statistics\") table_stats = get_table_stats(config, df, series_description) pbar.update()", "] number_of_tasks = 8 + len(df.columns) + len(correlation_names) with tqdm(", "package: package details. \"\"\" if df is None: raise ValueError(\"Can", "= get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str(\"Get messages/warnings\")", "date_start, \"date_end\": date_end, \"duration\": date_end - date_start, } return {", "\"pandas_profiling_version\": __version__, \"pandas_profiling_config\": config.json(), } pbar.update() pbar.set_postfix_str(\"Completed\") date_end = datetime.utcnow()", "type_name in variables.items() if type_name == \"Numeric\" ] pbar.update() #", "if sample is None: samples = get_sample(config, df) else: if", "pd.DataFrame): warnings.warn(\"df is not of type pandas.DataFrame\") disable_progress_bar = not", "matrices. - missing: missing value diagrams. - messages: direct special", "= get_series_descriptions( config, df, summarizer, typeset, pbar ) pbar.set_postfix_str(\"Get variable", "scatter_matrix = get_scatter_matrix(config, df, interval_columns) pbar.update() # Table statistics pbar.set_postfix_str(\"Get", "BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict] = None, ) -> dict:", "+ len(df.columns) + len(correlation_names) with tqdm( total=number_of_tasks, desc=\"Summarize dataset\", disable=disable_progress_bar", ") pbar.set_postfix_str(\"Get variable types\") variables = { column: description[\"type\"] for", "get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str(\"Get messages/warnings\") messages", "for key, value in correlations.items() if value is not None", "in this DataFrame. Args: config: report Settings object df: DataFrame.", "\"date_end\": date_end, \"duration\": date_end - date_start, } return { #", "variable descriptions \"variables\": series_description, # Bivariate relations \"scatter\": scatter_matrix, #", "from tqdm.auto import tqdm from visions import VisionsTypeset from pandas_profiling.config", "from visions import VisionsTypeset from pandas_profiling.config import Settings from pandas_profiling.model.correlations", "from pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample", "= 8 + len(df.columns) + len(correlation_names) with tqdm( total=number_of_tasks, desc=\"Summarize", "is not None correlations = { key: value for key,", "{ # Analysis metadata \"analysis\": analysis, # Overall dataset description", "pandas as pd from tqdm.auto import tqdm from visions import", "{ key: value for key, value in correlations.items() if value", "# make sure correlations is not None correlations = {", "samples = get_sample(config, df) else: if \"name\" not in sample:", "get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, ) from pandas_profiling.version import __version__ def", "table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str(\"Get reproduction details\") package = {", "DataFrame.\") if not isinstance(df, pd.DataFrame): warnings.warn(\"df is not of type", ") -> dict: \"\"\"Calculate the statistics for each series in", "# Warnings \"messages\": messages, # Package \"package\": package, # Sample", "= get_table_stats(config, df, series_description) pbar.update() # missing diagrams pbar.set_postfix_str(\"Get missing", "for column, type_name in variables.items() if type_name != \"Unsupported\" ]", "matrix\") scatter_matrix = get_scatter_matrix(config, df, interval_columns) pbar.update() # Table statistics", "\"scatter\": scatter_matrix, # Correlation matrices \"correlations\": correlations, # Missing values", "import __version__ def describe( config: Settings, df: pd.DataFrame, summarizer: BaseSummarizer,", "sample[\"caption\"] = None samples = [ Sample( id=\"custom\", data=sample[\"data\"], name=sample[\"name\"],", "id=\"custom\", data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"], ) ] pbar.update() # Duplicates pbar.set_postfix_str(\"Locating", "correlations[correlation_name] = calculate_correlation( config, df, correlation_name, series_description ) pbar.update() #", "for each series in this DataFrame. Args: config: report Settings", "} pbar.update() pbar.set_postfix_str(\"Completed\") date_end = datetime.utcnow() analysis = { \"title\":", "config.correlations[correlation_name].calculate ] number_of_tasks = 8 + len(df.columns) + len(correlation_names) with", "series_description ) pbar.update() # make sure correlations is not None", "missing diagrams pbar.set_postfix_str(\"Get missing diagrams\") missing = get_missing_diagrams(config, df, table_stats)", "table_stats = get_table_stats(config, df, series_description) pbar.update() # missing diagrams pbar.set_postfix_str(\"Get", "len(df.columns) + len(correlation_names) with tqdm( total=number_of_tasks, desc=\"Summarize dataset\", disable=disable_progress_bar )", "sample[\"name\"] = None if \"caption\" not in sample: sample[\"caption\"] =", "pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample import", "series_description) pbar.update() # missing diagrams pbar.set_postfix_str(\"Get missing diagrams\") missing =", "- table: overall statistics. - variables: descriptions per series. -", "DataFrame. sample: optional, dict with custom sample Returns: This function", "type_name in variables.items() if type_name != \"Unsupported\" ] interval_columns =", "is None: samples = get_sample(config, df) else: if \"name\" not", "] if config.correlations[correlation_name].calculate ] number_of_tasks = 8 + len(df.columns) +", "each series in this DataFrame. Args: config: report Settings object", "isinstance(df, pd.DataFrame): warnings.warn(\"df is not of type pandas.DataFrame\") disable_progress_bar =", "tqdm.auto import tqdm from visions import VisionsTypeset from pandas_profiling.config import", "VisionsTypeset from pandas_profiling.config import Settings from pandas_profiling.model.correlations import calculate_correlation from", "[ \"pearson\", \"spearman\", \"kendall\", \"phi_k\", \"cramers\", ] if config.correlations[correlation_name].calculate ]", "series_description.items() } supported_columns = [ column for column, type_name in", "table statistics\") table_stats = get_table_stats(config, df, series_description) pbar.update() # missing", "Bivariate relations \"scatter\": scatter_matrix, # Correlation matrices \"correlations\": correlations, #", "disable_progress_bar = not config.progress_bar date_start = datetime.utcnow() correlation_names = [", "pbar.set_postfix_str(\"Get missing diagrams\") missing = get_missing_diagrams(config, df, table_stats) pbar.update() #", "DataFrame.\"\"\" import warnings from datetime import datetime from typing import", "\"\"\" if df is None: raise ValueError(\"Can not describe a", "{correlation_name} correlation\") correlations[correlation_name] = calculate_correlation( config, df, correlation_name, series_description )", "details\") package = { \"pandas_profiling_version\": __version__, \"pandas_profiling_config\": config.json(), } pbar.update()", "import VisionsTypeset from pandas_profiling.config import Settings from pandas_profiling.model.correlations import calculate_correlation", "get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, ) from pandas_profiling.version import __version__", "in series_description.items() } supported_columns = [ column for column, type_name", "column for column, type_name in variables.items() if type_name != \"Unsupported\"", "variables.items() if type_name == \"Numeric\" ] pbar.update() # Get correlations", "\"Unsupported\" ] interval_columns = [ column for column, type_name in", "+ len(correlation_names) with tqdm( total=number_of_tasks, desc=\"Summarize dataset\", disable=disable_progress_bar ) as", "= get_scatter_matrix(config, df, interval_columns) pbar.update() # Table statistics pbar.set_postfix_str(\"Get table", "matrices \"correlations\": correlations, # Missing values \"missing\": missing, # Warnings", "8 + len(df.columns) + len(correlation_names) with tqdm( total=number_of_tasks, desc=\"Summarize dataset\",", "variables: descriptions per series. - correlations: correlation matrices. - missing:", "] pbar.update() # Duplicates pbar.set_postfix_str(\"Locating duplicates\") metrics, duplicates = get_duplicates(config,", ") pbar.update() # make sure correlations is not None correlations", "optional, dict with custom sample Returns: This function returns a", "missing, # Warnings \"messages\": messages, # Package \"package\": package, #", "date_end - date_start, } return { # Analysis metadata \"analysis\":", "matrix pbar.set_postfix_str(\"Get scatter matrix\") scatter_matrix = get_scatter_matrix(config, df, interval_columns) pbar.update()", "diagrams pbar.set_postfix_str(\"Get missing diagrams\") missing = get_missing_diagrams(config, df, table_stats) pbar.update()", "statistics for each series in this DataFrame. Args: config: report", "calculate_correlation from pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample import Sample, get_sample", "= [ correlation_name for correlation_name in [ \"pearson\", \"spearman\", \"kendall\",", "# Get correlations correlations = {} for correlation_name in correlation_names:", "missing: missing value diagrams. - messages: direct special attention to", "get_sample from pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary import ( get_messages,", "per series. - correlations: correlation matrices. - missing: missing value", "# Table statistics pbar.set_postfix_str(\"Get table statistics\") table_stats = get_table_stats(config, df,", "for column, type_name in variables.items() if type_name == \"Numeric\" ]", "as pd from tqdm.auto import tqdm from visions import VisionsTypeset", "is not None } # Scatter matrix pbar.set_postfix_str(\"Get scatter matrix\")", "reproduction details\") package = { \"pandas_profiling_version\": __version__, \"pandas_profiling_config\": config.json(), }", "statistics for each series in this DataFrame.\"\"\" import warnings from", "\"date_start\": date_start, \"date_end\": date_end, \"duration\": date_end - date_start, } return", "Settings, df: pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict] =", "in correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\") correlations[correlation_name] = calculate_correlation( config, df,", "} supported_columns = [ column for column, type_name in variables.items()", "df is None: raise ValueError(\"Can not describe a `lazy` ProfileReport", "pbar.set_postfix_str(\"Get variable types\") variables = { column: description[\"type\"] for column,", "description in series_description.items() } supported_columns = [ column for column,", "value is not None } # Scatter matrix pbar.set_postfix_str(\"Get scatter", "pbar.update() # make sure correlations is not None correlations =", "] pbar.update() # Get correlations correlations = {} for correlation_name", "column, description in series_description.items() } supported_columns = [ column for", "config, df, correlation_name, series_description ) pbar.update() # make sure correlations", "= get_sample(config, df) else: if \"name\" not in sample: sample[\"name\"]", "messages = get_messages(config, table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str(\"Get reproduction details\")", "from typing import Optional import pandas as pd from tqdm.auto", "df, correlation_name, series_description ) pbar.update() # make sure correlations is", "import Optional import pandas as pd from tqdm.auto import tqdm", "not in sample: sample[\"caption\"] = None samples = [ Sample(", "= calculate_correlation( config, df, correlation_name, series_description ) pbar.update() # make", "supported_columns) table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str(\"Get messages/warnings\") messages = get_messages(config,", "} return { # Analysis metadata \"analysis\": analysis, # Overall", "This function returns a dictionary containing: - table: overall statistics.", "summarizer, typeset, pbar ) pbar.set_postfix_str(\"Get variable types\") variables = {", "tqdm( total=number_of_tasks, desc=\"Summarize dataset\", disable=disable_progress_bar ) as pbar: series_description =", "describe a `lazy` ProfileReport without a DataFrame.\") if not isinstance(df,", "statistics\") table_stats = get_table_stats(config, df, series_description) pbar.update() # missing diagrams", "datetime.utcnow() analysis = { \"title\": config.title, \"date_start\": date_start, \"date_end\": date_end,", "None if \"caption\" not in sample: sample[\"caption\"] = None samples", "typeset, pbar ) pbar.set_postfix_str(\"Get variable types\") variables = { column:", "\"messages\": messages, # Package \"package\": package, # Sample \"sample\": samples,", "# Scatter matrix pbar.set_postfix_str(\"Get scatter matrix\") scatter_matrix = get_scatter_matrix(config, df,", "if value is not None } # Scatter matrix pbar.set_postfix_str(\"Get", "get_duplicates from pandas_profiling.model.sample import Sample, get_sample from pandas_profiling.model.summarizer import BaseSummarizer", "pandas.DataFrame\") disable_progress_bar = not config.progress_bar date_start = datetime.utcnow() correlation_names =", "# Analysis metadata \"analysis\": analysis, # Overall dataset description \"table\":", "in [ \"pearson\", \"spearman\", \"kendall\", \"phi_k\", \"cramers\", ] if config.correlations[correlation_name].calculate", "the calculation of statistics for each series in this DataFrame.\"\"\"", "from pandas_profiling.model.sample import Sample, get_sample from pandas_profiling.model.summarizer import BaseSummarizer from", "import BaseSummarizer from pandas_profiling.model.summary import ( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions,", "\"variables\": series_description, # Bivariate relations \"scatter\": scatter_matrix, # Correlation matrices", "sample: Optional[dict] = None, ) -> dict: \"\"\"Calculate the statistics", "variables.items() if type_name != \"Unsupported\" ] interval_columns = [ column", "dataset\", disable=disable_progress_bar ) as pbar: series_description = get_series_descriptions( config, df,", "data. - package: package details. \"\"\" if df is None:", "for correlation_name in [ \"pearson\", \"spearman\", \"kendall\", \"phi_k\", \"cramers\", ]", "disable=disable_progress_bar ) as pbar: series_description = get_series_descriptions( config, df, summarizer,", "= [ column for column, type_name in variables.items() if type_name", "] interval_columns = [ column for column, type_name in variables.items()", "summarizer: BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict] = None, ) ->", "sample: sample[\"caption\"] = None samples = [ Sample( id=\"custom\", data=sample[\"data\"],", "key: value for key, value in correlations.items() if value is", "None: samples = get_sample(config, df) else: if \"name\" not in", "details. \"\"\" if df is None: raise ValueError(\"Can not describe", "# Sample pbar.set_postfix_str(\"Take sample\") if sample is None: samples =", "dict with custom sample Returns: This function returns a dictionary", "correlation_name in correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\") correlations[correlation_name] = calculate_correlation( config,", "date_start, } return { # Analysis metadata \"analysis\": analysis, #", "- correlations: correlation matrices. - missing: missing value diagrams. -", "correlation_name for correlation_name in [ \"pearson\", \"spearman\", \"kendall\", \"phi_k\", \"cramers\",", "def describe( config: Settings, df: pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset,", "these patterns in your data. - package: package details. \"\"\"", "pbar.update() # missing diagrams pbar.set_postfix_str(\"Get missing diagrams\") missing = get_missing_diagrams(config,", "\"missing\": missing, # Warnings \"messages\": messages, # Package \"package\": package,", "messages, # Package \"package\": package, # Sample \"sample\": samples, #", "scatter_matrix, # Correlation matrices \"correlations\": correlations, # Missing values \"missing\":", "from pandas_profiling.config import Settings from pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates", "get_missing_diagrams(config, df, table_stats) pbar.update() # Sample pbar.set_postfix_str(\"Take sample\") if sample", "dataset description \"table\": table_stats, # Per variable descriptions \"variables\": series_description,", "if \"name\" not in sample: sample[\"name\"] = None if \"caption\"", "Args: config: report Settings object df: DataFrame. sample: optional, dict", "series in this DataFrame.\"\"\" import warnings from datetime import datetime", "not config.progress_bar date_start = datetime.utcnow() correlation_names = [ correlation_name for", "df: pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict] = None,", "# Overall dataset description \"table\": table_stats, # Per variable descriptions", "datetime.utcnow() correlation_names = [ correlation_name for correlation_name in [ \"pearson\",", "pbar.update() # Get correlations correlations = {} for correlation_name in", "None: raise ValueError(\"Can not describe a `lazy` ProfileReport without a", "config.json(), } pbar.update() pbar.set_postfix_str(\"Completed\") date_end = datetime.utcnow() analysis = {", "dictionary containing: - table: overall statistics. - variables: descriptions per", "with tqdm( total=number_of_tasks, desc=\"Summarize dataset\", disable=disable_progress_bar ) as pbar: series_description", "in sample: sample[\"name\"] = None if \"caption\" not in sample:", "tqdm from visions import VisionsTypeset from pandas_profiling.config import Settings from", "# Duplicates pbar.set_postfix_str(\"Locating duplicates\") metrics, duplicates = get_duplicates(config, df, supported_columns)", "make sure correlations is not None correlations = { key:", "{} for correlation_name in correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\") correlations[correlation_name] =", ") from pandas_profiling.version import __version__ def describe( config: Settings, df:", "Table statistics pbar.set_postfix_str(\"Get table statistics\") table_stats = get_table_stats(config, df, series_description)", "type_name != \"Unsupported\" ] interval_columns = [ column for column,", "None, ) -> dict: \"\"\"Calculate the statistics for each series", "= { \"pandas_profiling_version\": __version__, \"pandas_profiling_config\": config.json(), } pbar.update() pbar.set_postfix_str(\"Completed\") date_end", "from pandas_profiling.model.summary import ( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, )", "import datetime from typing import Optional import pandas as pd", "calculate_correlation( config, df, correlation_name, series_description ) pbar.update() # make sure", "date_start = datetime.utcnow() correlation_names = [ correlation_name for correlation_name in", "[ Sample( id=\"custom\", data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"], ) ] pbar.update() #", "missing = get_missing_diagrams(config, df, table_stats) pbar.update() # Sample pbar.set_postfix_str(\"Take sample\")", "pbar.set_postfix_str(\"Locating duplicates\") metrics, duplicates = get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update()", "column for column, type_name in variables.items() if type_name == \"Numeric\"", "variable types\") variables = { column: description[\"type\"] for column, description", "not describe a `lazy` ProfileReport without a DataFrame.\") if not", "Overall dataset description \"table\": table_stats, # Per variable descriptions \"variables\":", "duplicates = get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str(\"Get", "df, table_stats) pbar.update() # Sample pbar.set_postfix_str(\"Take sample\") if sample is", "value for key, value in correlations.items() if value is not", "object df: DataFrame. sample: optional, dict with custom sample Returns:", "__version__ def describe( config: Settings, df: pd.DataFrame, summarizer: BaseSummarizer, typeset:", "typing import Optional import pandas as pd from tqdm.auto import", "describe( config: Settings, df: pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset, sample:", "\"duration\": date_end - date_start, } return { # Analysis metadata", "supported_columns = [ column for column, type_name in variables.items() if", "diagrams. - messages: direct special attention to these patterns in", "# Package \"package\": package, # Sample \"sample\": samples, # Duplicates", "df, summarizer, typeset, pbar ) pbar.set_postfix_str(\"Get variable types\") variables =", "= not config.progress_bar date_start = datetime.utcnow() correlation_names = [ correlation_name", "pbar.set_postfix_str(\"Get reproduction details\") package = { \"pandas_profiling_version\": __version__, \"pandas_profiling_config\": config.json(),", "sample: sample[\"name\"] = None if \"caption\" not in sample: sample[\"caption\"]", "date_end = datetime.utcnow() analysis = { \"title\": config.title, \"date_start\": date_start,", "Returns: This function returns a dictionary containing: - table: overall", "scatter matrix\") scatter_matrix = get_scatter_matrix(config, df, interval_columns) pbar.update() # Table", "from pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary import ( get_messages, get_missing_diagrams,", "of statistics for each series in this DataFrame.\"\"\" import warnings", "relations \"scatter\": scatter_matrix, # Correlation matrices \"correlations\": correlations, # Missing", "dict: \"\"\"Calculate the statistics for each series in this DataFrame.", "function returns a dictionary containing: - table: overall statistics. -", "correlation_name, series_description ) pbar.update() # make sure correlations is not", "config, df, summarizer, typeset, pbar ) pbar.set_postfix_str(\"Get variable types\") variables", "type pandas.DataFrame\") disable_progress_bar = not config.progress_bar date_start = datetime.utcnow() correlation_names", "messages: direct special attention to these patterns in your data.", "# Missing values \"missing\": missing, # Warnings \"messages\": messages, #", "= { key: value for key, value in correlations.items() if", "= [ Sample( id=\"custom\", data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"], ) ] pbar.update()", "each series in this DataFrame.\"\"\" import warnings from datetime import", "without a DataFrame.\") if not isinstance(df, pd.DataFrame): warnings.warn(\"df is not", "= {} for correlation_name in correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\") correlations[correlation_name]", "this DataFrame.\"\"\" import warnings from datetime import datetime from typing", "Correlation matrices \"correlations\": correlations, # Missing values \"missing\": missing, #", "series_description, correlations) pbar.update() pbar.set_postfix_str(\"Get reproduction details\") package = { \"pandas_profiling_version\":", "correlation matrices. - missing: missing value diagrams. - messages: direct", "\"\"\"Calculate the statistics for each series in this DataFrame. Args:", "Settings object df: DataFrame. sample: optional, dict with custom sample", "in this DataFrame.\"\"\" import warnings from datetime import datetime from", "interval_columns = [ column for column, type_name in variables.items() if", "# Per variable descriptions \"variables\": series_description, # Bivariate relations \"scatter\":", "this DataFrame. Args: config: report Settings object df: DataFrame. sample:", "name=sample[\"name\"], caption=sample[\"caption\"], ) ] pbar.update() # Duplicates pbar.set_postfix_str(\"Locating duplicates\") metrics,", "\"cramers\", ] if config.correlations[correlation_name].calculate ] number_of_tasks = 8 + len(df.columns)", "= None if \"caption\" not in sample: sample[\"caption\"] = None", "get_scatter_matrix(config, df, interval_columns) pbar.update() # Table statistics pbar.set_postfix_str(\"Get table statistics\")", "raise ValueError(\"Can not describe a `lazy` ProfileReport without a DataFrame.\")", "types\") variables = { column: description[\"type\"] for column, description in", "get_series_descriptions, get_table_stats, ) from pandas_profiling.version import __version__ def describe( config:", "Sample, get_sample from pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary import (", "from datetime import datetime from typing import Optional import pandas", "pandas_profiling.model.summary import ( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, ) from", "VisionsTypeset, sample: Optional[dict] = None, ) -> dict: \"\"\"Calculate the", "[ column for column, type_name in variables.items() if type_name ==", "Messages pbar.set_postfix_str(\"Get messages/warnings\") messages = get_messages(config, table_stats, series_description, correlations) pbar.update()", "from pandas_profiling.version import __version__ def describe( config: Settings, df: pd.DataFrame,", "is not of type pandas.DataFrame\") disable_progress_bar = not config.progress_bar date_start", "values \"missing\": missing, # Warnings \"messages\": messages, # Package \"package\":", "Sample pbar.set_postfix_str(\"Take sample\") if sample is None: samples = get_sample(config,", "table_stats) pbar.update() # Sample pbar.set_postfix_str(\"Take sample\") if sample is None:", "# Bivariate relations \"scatter\": scatter_matrix, # Correlation matrices \"correlations\": correlations,", "column, type_name in variables.items() if type_name == \"Numeric\" ] pbar.update()", "config: Settings, df: pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict]", "= None samples = [ Sample( id=\"custom\", data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"],", "len(correlation_names) with tqdm( total=number_of_tasks, desc=\"Summarize dataset\", disable=disable_progress_bar ) as pbar:", "= datetime.utcnow() analysis = { \"title\": config.title, \"date_start\": date_start, \"date_end\":", "pbar.set_postfix_str(\"Get scatter matrix\") scatter_matrix = get_scatter_matrix(config, df, interval_columns) pbar.update() #", "in correlations.items() if value is not None } # Scatter", "config.title, \"date_start\": date_start, \"date_end\": date_end, \"duration\": date_end - date_start, }", "in variables.items() if type_name != \"Unsupported\" ] interval_columns = [", "value diagrams. - messages: direct special attention to these patterns", "type_name == \"Numeric\" ] pbar.update() # Get correlations correlations =", "desc=\"Summarize dataset\", disable=disable_progress_bar ) as pbar: series_description = get_series_descriptions( config,", "messages/warnings\") messages = get_messages(config, table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str(\"Get reproduction", "\"pearson\", \"spearman\", \"kendall\", \"phi_k\", \"cramers\", ] if config.correlations[correlation_name].calculate ] number_of_tasks", "ProfileReport without a DataFrame.\") if not isinstance(df, pd.DataFrame): warnings.warn(\"df is", "if not isinstance(df, pd.DataFrame): warnings.warn(\"df is not of type pandas.DataFrame\")", "-> dict: \"\"\"Calculate the statistics for each series in this", "in sample: sample[\"caption\"] = None samples = [ Sample( id=\"custom\",", "data=sample[\"data\"], name=sample[\"name\"], caption=sample[\"caption\"], ) ] pbar.update() # Duplicates pbar.set_postfix_str(\"Locating duplicates\")", "in variables.items() if type_name == \"Numeric\" ] pbar.update() # Get", "{ \"title\": config.title, \"date_start\": date_start, \"date_end\": date_end, \"duration\": date_end -", "package, # Sample \"sample\": samples, # Duplicates \"duplicates\": duplicates, }", "from pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample import Sample, get_sample from", "is None: raise ValueError(\"Can not describe a `lazy` ProfileReport without", "\"pandas_profiling_config\": config.json(), } pbar.update() pbar.set_postfix_str(\"Completed\") date_end = datetime.utcnow() analysis =", "= None, ) -> dict: \"\"\"Calculate the statistics for each", "get_messages(config, table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str(\"Get reproduction details\") package =", "import Sample, get_sample from pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary import", "import Settings from pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates import get_duplicates", "pbar.update() # Messages pbar.set_postfix_str(\"Get messages/warnings\") messages = get_messages(config, table_stats, series_description,", "descriptions per series. - correlations: correlation matrices. - missing: missing", "for correlation_name in correlation_names: pbar.set_postfix_str(f\"Calculate {correlation_name} correlation\") correlations[correlation_name] = calculate_correlation(", "correlation\") correlations[correlation_name] = calculate_correlation( config, df, correlation_name, series_description ) pbar.update()", "\"spearman\", \"kendall\", \"phi_k\", \"cramers\", ] if config.correlations[correlation_name].calculate ] number_of_tasks =", "`lazy` ProfileReport without a DataFrame.\") if not isinstance(df, pd.DataFrame): warnings.warn(\"df", "- messages: direct special attention to these patterns in your", "Optional import pandas as pd from tqdm.auto import tqdm from", "pandas_profiling.model.sample import Sample, get_sample from pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary", "{ \"pandas_profiling_version\": __version__, \"pandas_profiling_config\": config.json(), } pbar.update() pbar.set_postfix_str(\"Completed\") date_end =", "Duplicates pbar.set_postfix_str(\"Locating duplicates\") metrics, duplicates = get_duplicates(config, df, supported_columns) table_stats.update(metrics)", "pbar.set_postfix_str(\"Take sample\") if sample is None: samples = get_sample(config, df)", "pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary import ( get_messages, get_missing_diagrams, get_scatter_matrix,", "pbar: series_description = get_series_descriptions( config, df, summarizer, typeset, pbar )", "of type pandas.DataFrame\") disable_progress_bar = not config.progress_bar date_start = datetime.utcnow()", "\"caption\" not in sample: sample[\"caption\"] = None samples = [", "column: description[\"type\"] for column, description in series_description.items() } supported_columns =", "a `lazy` ProfileReport without a DataFrame.\") if not isinstance(df, pd.DataFrame):" ]
[ "campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers: filename = camper", "if __name__ == \"__main__\": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in", "global index if name not in stdsDict: newStd = Std()", "index if name not in stdsDict: newStd = Std() newStd.name", "stds.append(newStd) stdsDict[name] = index index += 1 if camper not", "newStd.name = name stds.append(newStd) stdsDict[name] = index index += 1", "stdsDict[name] = index index += 1 if camper not in", "1 if camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1", "+= 1 if __name__ == \"__main__\": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for", "operator class Std(object): def __init__(self): self.name = '' self.offerNum =", "import operator class Std(object): def __init__(self): self.name = '' self.offerNum", "camper + '.txt' with open('data/%s'%(filename), \"r\") as f: data =", "not in stdsDict: newStd = Std() newStd.name = name stds.append(newStd)", "== \"__main__\": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers: filename", "stds[stdsDict[name]].offerNum += 1 if __name__ == \"__main__\": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs']", "+= 1 if camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum +=", "camper in campers: filename = camper + '.txt' with open('data/%s'%(filename),", "['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers: filename = camper + '.txt'", "def readStd(name,camper): global stds global stdsDict global index if name", "'\\n': std.name = std.name[:-1] print(f'{std.name} 拿了 {std.offerNum} 个 offer: {std.offers}')", "def __init__(self): self.name = '' self.offerNum = 0 self.offers =", "for std in stds: if std.name[-1] == '\\n': std.name =", "in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if __name__ == \"__main__\":", "name not in stdsDict: newStd = Std() newStd.name = name", "= camper + '.txt' with open('data/%s'%(filename), \"r\") as f: data", "open('data/%s'%(filename), \"r\") as f: data = f.readlines() for std in", "filename = camper + '.txt' with open('data/%s'%(filename), \"r\") as f:", "in stds: if std.name[-1] == '\\n': std.name = std.name[:-1] print(f'{std.name}", "index index += 1 if camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper)", "global stdsDict global index if name not in stdsDict: newStd", "<gh_stars>0 import operator class Std(object): def __init__(self): self.name = ''", "stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if __name__ == \"__main__\": campers", "'.txt' with open('data/%s'%(filename), \"r\") as f: data = f.readlines() for", "True) for std in stds: if std.name[-1] == '\\n': std.name", "'' self.offerNum = 0 self.offers = [] stds = []", "__init__(self): self.name = '' self.offerNum = 0 self.offers = []", "= name stds.append(newStd) stdsDict[name] = index index += 1 if", "f: data = f.readlines() for std in data: readStd(std,camper) cmpfun", "Std() newStd.name = name stds.append(newStd) stdsDict[name] = index index +=", "in campers: filename = camper + '.txt' with open('data/%s'%(filename), \"r\")", "[] stdsDict = {} index = 0 def readStd(name,camper): global", "= 0 self.offers = [] stds = [] stdsDict =", "stdsDict: newStd = Std() newStd.name = name stds.append(newStd) stdsDict[name] =", "with open('data/%s'%(filename), \"r\") as f: data = f.readlines() for std", "f.readlines() for std in data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key", "= ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers: filename = camper +", "= 0 def readStd(name,camper): global stds global stdsDict global index", "__name__ == \"__main__\": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers:", "cmpfun,reverse = True) for std in stds: if std.name[-1] ==", "in data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse =", "\"__main__\": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers: filename =", "+ '.txt' with open('data/%s'%(filename), \"r\") as f: data = f.readlines()", "cmpfun = operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse = True) for std", "stds: if std.name[-1] == '\\n': std.name = std.name[:-1] print(f'{std.name} 拿了", "data = f.readlines() for std in data: readStd(std,camper) cmpfun =", "for camper in campers: filename = camper + '.txt' with", "class Std(object): def __init__(self): self.name = '' self.offerNum = 0", "= cmpfun,reverse = True) for std in stds: if std.name[-1]", "= f.readlines() for std in data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name')", "for std in data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key =", "{} index = 0 def readStd(name,camper): global stds global stdsDict", "readStd(name,camper): global stds global stdsDict global index if name not", "if camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if", "newStd = Std() newStd.name = name stds.append(newStd) stdsDict[name] = index", "= [] stds = [] stdsDict = {} index =", "0 self.offers = [] stds = [] stdsDict = {}", "stds = [] stdsDict = {} index = 0 def", "not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if __name__ ==", "std.name[-1] == '\\n': std.name = std.name[:-1] print(f'{std.name} 拿了 {std.offerNum} 个", "if name not in stdsDict: newStd = Std() newStd.name =", "= '' self.offerNum = 0 self.offers = [] stds =", "= [] stdsDict = {} index = 0 def readStd(name,camper):", "operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse = True) for std in stds:", "= operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse = True) for std in", "= index index += 1 if camper not in stds[stdsDict[name]].offers:", "Std(object): def __init__(self): self.name = '' self.offerNum = 0 self.offers", "index += 1 if camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum", "= True) for std in stds: if std.name[-1] == '\\n':", "self.offerNum = 0 self.offers = [] stds = [] stdsDict", "std in stds: if std.name[-1] == '\\n': std.name = std.name[:-1]", "stds global stdsDict global index if name not in stdsDict:", "stdsDict = {} index = 0 def readStd(name,camper): global stds", "readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse = True) for", "self.offers = [] stds = [] stdsDict = {} index", "stdsDict global index if name not in stdsDict: newStd =", "\"r\") as f: data = f.readlines() for std in data:", "data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse = True)", "as f: data = f.readlines() for std in data: readStd(std,camper)", "campers: filename = camper + '.txt' with open('data/%s'%(filename), \"r\") as", "std in data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse", "[] stds = [] stdsDict = {} index = 0", "0 def readStd(name,camper): global stds global stdsDict global index if", "in stdsDict: newStd = Std() newStd.name = name stds.append(newStd) stdsDict[name]", "= Std() newStd.name = name stds.append(newStd) stdsDict[name] = index index", "camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if __name__", "name stds.append(newStd) stdsDict[name] = index index += 1 if camper", "stds.sort(key = cmpfun,reverse = True) for std in stds: if", "1 if __name__ == \"__main__\": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper", "self.name = '' self.offerNum = 0 self.offers = [] stds", "== '\\n': std.name = std.name[:-1] print(f'{std.name} 拿了 {std.offerNum} 个 offer:", "global stds global stdsDict global index if name not in", "= {} index = 0 def readStd(name,camper): global stds global", "index = 0 def readStd(name,camper): global stds global stdsDict global", "stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if __name__ == \"__main__\": campers =", "if std.name[-1] == '\\n': std.name = std.name[:-1] print(f'{std.name} 拿了 {std.offerNum}" ]
[ "'copyreg', 'html', 'http', 'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc' ])", "Do any of the above folders exist in build/lib? files", "2\" LONG_DESC = src.future.__doc__ AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\"", "If the user happens to run: # python2 setup.py build", "\"future.types\", \"future.standard_library\", \"future.backports\", \"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\",", "\"http\", \"queue\", \"reprlib\", \"socketserver\", \"tkinter\", \"winreg\", \"xmlrpc\", \"_dummy_thread\", \"_markupbase\", \"_thread\",", ":: Python :: 3.4\", \"License :: OSI Approved\", \"License ::", "\"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\",", "= \"Clean single-source support for Python 3 and 2\" LONG_DESC", "Language :: Python :: 2.7\", \"Programming Language :: Python ::", "install, depending on the folder order in # sys.path. (Running", "OSI Approved :: MIT License\", \"Development Status :: 4 -", "} REQUIRES = [] TEST_REQUIRES = [] if sys.version_info[:2] ==", "user happens to run: # python2 setup.py build # python3", "\"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\", # for future.tests.base #", "['*.py'], } REQUIRES = [] TEST_REQUIRES = [] if sys.version_info[:2]", "\"builtins\", \"configparser\", \"copyreg\", \"html\", \"http\", \"queue\", \"reprlib\", \"socketserver\", \"tkinter\", \"winreg\",", "LICENSE = \"MIT\" KEYWORDS = \"future past python3 migration futurize", "'html', 'http', 'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc' ]) if", "if they run \"python2 setup.py # build\" and then \"python3", "\"future.tests.test_email\", \"future.utils\", \"past\", \"past.builtins\", \"past.types\", \"past.utils\", # \"past.tests\", \"past.translation\", \"libfuturize\",", "of the intended system stdlib modules.) SYSTEM_MODULES = set([ '_dummy_thread',", "import src.future VERSION = src.future.__version__ DESCRIPTION = \"Clean single-source support", "may pick up our Py2 # substitute packages, instead of", "from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py", "pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS,", "package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite = \"discover_tests\",", "pasteurize 3to2\" CLASSIFIERS = [ \"Programming Language :: Python\", \"Programming", "install # then folders like \"configparser\" will be in build/lib.", "'_markupbase', '_thread', 'builtins', 'configparser', 'copyreg', 'html', 'http', 'queue', 'reprlib', 'socketserver',", "\"Programming Language :: Python :: 3\", \"Programming Language :: Python", "3.3\", \"Programming Language :: Python :: 3.4\", \"License :: OSI", "2.7\", \"Programming Language :: Python :: 3\", \"Programming Language ::", "+= ['unittest2'] import src.future VERSION = src.future.__version__ DESCRIPTION = \"Clean", "'builtins', 'configparser', 'copyreg', 'html', 'http', 'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg',", "if sys.version_info[:2] == (2, 6): REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES", "\"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\", ] # PEP 3108 stdlib moves:", "= src.future.__version__ DESCRIPTION = \"Clean single-source support for Python 3", "try: # If the user happens to run: # python2", "\"future\" PACKAGES = [\"future\", \"future.builtins\", \"future.types\", \"future.standard_library\", \"future.backports\", \"future.backports.email\", \"future.backports.email.mime\",", "[ 'futurize = libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ] }, package_dir={'':", "run ' 'setup.py again.', file=sys.stderr) sys.exit(1) except OSError: pass setup(name=NAME,", "sys.exit() NAME = \"future\" PACKAGES = [\"future\", \"future.builtins\", \"future.types\", \"future.standard_library\",", "Python :: 3\", \"Programming Language :: Python :: 3.3\", \"Programming", "\"future.standard_library\", \"future.backports\", \"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\",", "in # sys.path. (Running \"import configparser\" etc. may pick up", "Audience :: Developers\", ] setup_kwds = {} # * Important", "\"Development Status :: 4 - Beta\", \"Intended Audience :: Developers\",", "\"configparser\", \"copyreg\", \"html\", \"http\", \"queue\", \"reprlib\", \"socketserver\", \"tkinter\", \"winreg\", \"xmlrpc\",", "CLASSIFIERS = [ \"Programming Language :: Python\", \"Programming Language ::", "PEP 3108 stdlib moves: if sys.version_info[:2] < (3, 0): PACKAGES", "'socketserver', 'tkinter', 'winreg', 'xmlrpc' ]) if sys.version_info[0] >= 3: #", "like \"configparser\" will be in build/lib. # If so, we", "'TESTING.txt', ], 'tests': ['*.py'], } REQUIRES = [] TEST_REQUIRES =", "[ \"builtins\", \"configparser\", \"copyreg\", \"html\", \"http\", \"queue\", \"reprlib\", \"socketserver\", \"tkinter\",", "libpasteurize.main:main' ] }, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS,", "depending on the folder order in # sys.path. (Running \"import", ":: Python :: 3.3\", \"Programming Language :: Python :: 3.4\",", "import os.path import sys try: from setuptools import setup except", "up our Py2 # substitute packages, instead of the intended", "MIT License\", \"Development Status :: 4 - Beta\", \"Intended Audience", "build folder to avoid breaking the # user's Py3 installation", "Language :: Python :: 3.3\", \"Programming Language :: Python ::", "'lib')) if len(set(files) & set(SYSTEM_MODULES)) > 0: print('ERROR: Your build", "file=sys.stderr) sys.exit(1) except OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL,", "avoid breaking the # user's Py3 installation if they run", "url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize =", "build # python3 setup.py install # then folders like \"configparser\"", "'tests': ['*.py'], } REQUIRES = [] TEST_REQUIRES = [] if", "setup.py # build\" and then \"python3 setup.py install\". try: #", "> 0: print('ERROR: Your build folder is in an inconsistent", "\"configparser\" will be in build/lib. # If so, we CANNOT", "except OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC,", "except ImportError: from distutils.core import setup if sys.argv[-1] == 'publish':", "= src.future.__doc__ AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" URL=\"https://python-future.org\" LICENSE", ":: 2.6\", \"Programming Language :: Python :: 2.7\", \"Programming Language", "& set(SYSTEM_MODULES)) > 0: print('ERROR: Your build folder is in", "'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'], }", "install this, because # this may break his/her Python 3", "the user install this, because # this may break his/her", "\"queue\", \"reprlib\", \"socketserver\", \"tkinter\", \"winreg\", \"xmlrpc\", \"_dummy_thread\", \"_markupbase\", \"_thread\", ]", "\"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\",", "sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() NAME =", "set(SYSTEM_MODULES)) > 0: print('ERROR: Your build folder is in an", "folder order in # sys.path. (Running \"import configparser\" etc. may", "\"future.utils\", \"past\", \"past.builtins\", \"past.types\", \"past.utils\", # \"past.tests\", \"past.translation\", \"libfuturize\", \"libfuturize.fixes\",", "CANNOT let the user install this, because # this may", "\"tkinter\", \"winreg\", \"xmlrpc\", \"_dummy_thread\", \"_markupbase\", \"_thread\", ] PACKAGE_DATA = {'':", "installation if they run \"python2 setup.py # build\" and then", "\"socketserver\", \"tkinter\", \"winreg\", \"xmlrpc\", \"_dummy_thread\", \"_markupbase\", \"_thread\", ] PACKAGE_DATA =", "set([ '_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser', 'copyreg', 'html', 'http', 'queue',", "above folders exist in build/lib? files = os.listdir(os.path.join('build', 'lib')) if", "]) if sys.version_info[0] >= 3: # Do any of the", "'publish': os.system('python setup.py sdist upload') sys.exit() NAME = \"future\" PACKAGES", "they run \"python2 setup.py # build\" and then \"python3 setup.py", "'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite = \"discover_tests\", tests_require=TEST_REQUIRES,", "and 2\" LONG_DESC = src.future.__doc__ AUTHOR = \"<NAME>\" AUTHOR_EMAIL =", "\"future.moves.xmlrpc\", \"future.tests\", # for future.tests.base # \"future.tests.test_email\", \"future.utils\", \"past\", \"past.builtins\",", "migration futurize backport six 2to3 modernize pasteurize 3to2\" CLASSIFIERS =", "setup_kwds = {} # * Important * # We forcibly", "setup.py install\". try: # If the user happens to run:", "any of the above folders exist in build/lib? files =", "= \"MIT\" KEYWORDS = \"future past python3 migration futurize backport", "\"python3 setup.py install\". try: # If the user happens to", "manually and run ' 'setup.py again.', file=sys.stderr) sys.exit(1) except OSError:", "setup except ImportError: from distutils.core import setup if sys.argv[-1] ==", "Developers\", ] setup_kwds = {} # * Important * #", "Python :: 2.6\", \"Programming Language :: Python :: 2.7\", \"Programming", "'_thread', 'builtins', 'configparser', 'copyreg', 'html', 'http', 'queue', 'reprlib', 'socketserver', 'tkinter',", "and then \"python3 setup.py install\". try: # If the user", "= \"future past python3 migration futurize backport six 2to3 modernize", "'http', 'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc' ]) if sys.version_info[0]", "\"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\",", "version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts':", "will be in build/lib. # If so, we CANNOT let", "packages, instead of the intended system stdlib modules.) SYSTEM_MODULES =", "\"future.moves\", \"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\", #", "from __future__ import absolute_import, print_function import os import os.path import", "\"python2 setup.py # build\" and then \"python3 setup.py install\". try:", "* # We forcibly remove the build folder to avoid", "REQUIRES = [] TEST_REQUIRES = [] if sys.version_info[:2] == (2,", "# for future.tests.base # \"future.tests.test_email\", \"future.utils\", \"past\", \"past.builtins\", \"past.types\", \"past.utils\",", "distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist", "'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'],", "3.x install. Please remove it manually and run ' 'setup.py", "python2 setup.py build # python3 setup.py install # then folders", "be in build/lib. # If so, we CANNOT let the", "(Running \"import configparser\" etc. may pick up our Py2 #", "\"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\",", "moves: if sys.version_info[:2] < (3, 0): PACKAGES += [ \"builtins\",", "python3 migration futurize backport six 2to3 modernize pasteurize 3to2\" CLASSIFIERS", "\"html\", \"http\", \"queue\", \"reprlib\", \"socketserver\", \"tkinter\", \"winreg\", \"xmlrpc\", \"_dummy_thread\", \"_markupbase\",", "'console_scripts': [ 'futurize = libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ] },", "\"MIT\" KEYWORDS = \"future past python3 migration futurize backport six", "six 2to3 modernize pasteurize 3to2\" CLASSIFIERS = [ \"Programming Language", "3.4\", \"License :: OSI Approved\", \"License :: OSI Approved ::", "\"past\", \"past.builtins\", \"past.types\", \"past.utils\", # \"past.tests\", \"past.translation\", \"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\",", "'pasteurize = libpasteurize.main:main' ] }, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True,", "+= ['importlib', 'argparse'] TEST_REQUIRES += ['unittest2'] import src.future VERSION =", "run: # python2 setup.py build # python3 setup.py install #", "long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize = libfuturize.main:main', 'pasteurize", "pick up our Py2 # substitute packages, instead of the", "# * Important * # We forcibly remove the build", "setuptools import setup except ImportError: from distutils.core import setup if", "is in an inconsistent state for ' 'a Python 3.x", "\"future.tests\", # for future.tests.base # \"future.tests.test_email\", \"future.utils\", \"past\", \"past.builtins\", \"past.types\",", "6): REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES += ['unittest2'] import src.future", "KEYWORDS = \"future past python3 migration futurize backport six 2to3", "3108 stdlib moves: if sys.version_info[:2] < (3, 0): PACKAGES +=", "single-source support for Python 3 and 2\" LONG_DESC = src.future.__doc__", "# build\" and then \"python3 setup.py install\". try: # If", "futurize backport six 2to3 modernize pasteurize 3to2\" CLASSIFIERS = [", "os import os.path import sys try: from setuptools import setup", "setup.py install # then folders like \"configparser\" will be in", "ImportError: from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python", "Python :: 3.3\", \"Programming Language :: Python :: 3.4\", \"License", "import sys try: from setuptools import setup except ImportError: from", "# If the user happens to run: # python2 setup.py", "modernize pasteurize 3to2\" CLASSIFIERS = [ \"Programming Language :: Python\",", "the folder order in # sys.path. (Running \"import configparser\" etc.", "build\" and then \"python3 setup.py install\". try: # If the", "our Py2 # substitute packages, instead of the intended system", "\"future past python3 migration futurize backport six 2to3 modernize pasteurize", "2.6\", \"Programming Language :: Python :: 2.7\", \"Programming Language ::", "\"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\", # for future.tests.base # \"future.tests.test_email\",", "DESCRIPTION = \"Clean single-source support for Python 3 and 2\"", "stdlib moves: if sys.version_info[:2] < (3, 0): PACKAGES += [", "= [\"future\", \"future.builtins\", \"future.types\", \"future.standard_library\", \"future.backports\", \"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\",", ":: Developers\", ] setup_kwds = {} # * Important *", "# We forcibly remove the build folder to avoid breaking", "\"copyreg\", \"html\", \"http\", \"queue\", \"reprlib\", \"socketserver\", \"tkinter\", \"winreg\", \"xmlrpc\", \"_dummy_thread\",", "= [ \"Programming Language :: Python\", \"Programming Language :: Python", "etc. may pick up our Py2 # substitute packages, instead", "= os.listdir(os.path.join('build', 'lib')) if len(set(files) & set(SYSTEM_MODULES)) > 0: print('ERROR:", "Language :: Python :: 3.4\", \"License :: OSI Approved\", \"License", "Python 3.x install. Please remove it manually and run '", "sys try: from setuptools import setup except ImportError: from distutils.core", "package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite = \"discover_tests\", tests_require=TEST_REQUIRES, **setup_kwds )", "'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc' ]) if sys.version_info[0] >=", "'winreg', 'xmlrpc' ]) if sys.version_info[0] >= 3: # Do any", "an inconsistent state for ' 'a Python 3.x install. Please", "# Do any of the above folders exist in build/lib?", "(3, 0): PACKAGES += [ \"builtins\", \"configparser\", \"copyreg\", \"html\", \"http\",", "\"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\", # for", "{'': [ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ],", "URL=\"https://python-future.org\" LICENSE = \"MIT\" KEYWORDS = \"future past python3 migration", "then \"python3 setup.py install\". try: # If the user happens", "remove the build folder to avoid breaking the # user's", "build folder is in an inconsistent state for ' 'a", "state for ' 'a Python 3.x install. Please remove it", "SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser', 'copyreg', 'html',", "Beta\", \"Intended Audience :: Developers\", ] setup_kwds = {} #", "OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE,", "os.system('python setup.py sdist upload') sys.exit() NAME = \"future\" PACKAGES =", "remove it manually and run ' 'setup.py again.', file=sys.stderr) sys.exit(1)", "3to2\" CLASSIFIERS = [ \"Programming Language :: Python\", \"Programming Language", "= \"<EMAIL>\" URL=\"https://python-future.org\" LICENSE = \"MIT\" KEYWORDS = \"future past", "if len(set(files) & set(SYSTEM_MODULES)) > 0: print('ERROR: Your build folder", "Python :: 2.7\", \"Programming Language :: Python :: 3\", \"Programming", "OSI Approved\", \"License :: OSI Approved :: MIT License\", \"Development", "'setup.py again.', file=sys.stderr) sys.exit(1) except OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR,", ":: OSI Approved\", \"License :: OSI Approved :: MIT License\",", "may break his/her Python 3 install, depending on the folder", "PACKAGES = [\"future\", \"future.builtins\", \"future.types\", \"future.standard_library\", \"future.backports\", \"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\",", "in an inconsistent state for ' 'a Python 3.x install.", "] # PEP 3108 stdlib moves: if sys.version_info[:2] < (3,", "= libpasteurize.main:main' ] }, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES,", "author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize", "\"future.backports.html\", \"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\", \"future.moves.test\",", "License\", \"Development Status :: 4 - Beta\", \"Intended Audience ::", "for future.tests.base # \"future.tests.test_email\", \"future.utils\", \"past\", \"past.builtins\", \"past.types\", \"past.utils\", #", "\"future.builtins\", \"future.types\", \"future.standard_library\", \"future.backports\", \"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\",", "user install this, because # this may break his/her Python", "configparser\" etc. may pick up our Py2 # substitute packages,", "'a Python 3.x install. Please remove it manually and run", "the intended system stdlib modules.) SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase',", "inconsistent state for ' 'a Python 3.x install. Please remove", "\"libpasteurize\", \"libpasteurize.fixes\", ] # PEP 3108 stdlib moves: if sys.version_info[:2]", "from setuptools import setup except ImportError: from distutils.core import setup", "\"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" URL=\"https://python-future.org\" LICENSE = \"MIT\" KEYWORDS =", "libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ] }, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA,", "\"Programming Language :: Python :: 3.4\", \"License :: OSI Approved\",", "setup.py sdist upload') sys.exit() NAME = \"future\" PACKAGES = [\"future\",", "run \"python2 setup.py # build\" and then \"python3 setup.py install\".", "to run: # python2 setup.py build # python3 setup.py install", "PACKAGES += [ \"builtins\", \"configparser\", \"copyreg\", \"html\", \"http\", \"queue\", \"reprlib\",", "AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" URL=\"https://python-future.org\" LICENSE = \"MIT\"", "Important * # We forcibly remove the build folder to", "in build/lib? files = os.listdir(os.path.join('build', 'lib')) if len(set(files) & set(SYSTEM_MODULES))", "\"_dummy_thread\", \"_markupbase\", \"_thread\", ] PACKAGE_DATA = {'': [ 'README.rst', 'LICENSE.txt',", "print('ERROR: Your build folder is in an inconsistent state for", "= set([ '_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser', 'copyreg', 'html', 'http',", "for Python 3 and 2\" LONG_DESC = src.future.__doc__ AUTHOR =", "Language :: Python :: 3\", \"Programming Language :: Python ::", "= [] TEST_REQUIRES = [] if sys.version_info[:2] == (2, 6):", "\"winreg\", \"xmlrpc\", \"_dummy_thread\", \"_markupbase\", \"_thread\", ] PACKAGE_DATA = {'': [", "We forcibly remove the build folder to avoid breaking the", "\"Intended Audience :: Developers\", ] setup_kwds = {} # *", "intended system stdlib modules.) SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase', '_thread',", "entry_points={ 'console_scripts': [ 'futurize = libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ]", "import setup except ImportError: from distutils.core import setup if sys.argv[-1]", "' 'a Python 3.x install. Please remove it manually and", "'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'], } REQUIRES =", "Python 3 and 2\" LONG_DESC = src.future.__doc__ AUTHOR = \"<NAME>\"", "+= [ \"builtins\", \"configparser\", \"copyreg\", \"html\", \"http\", \"queue\", \"reprlib\", \"socketserver\",", "2to3 modernize pasteurize 3to2\" CLASSIFIERS = [ \"Programming Language ::", "python3 setup.py install # then folders like \"configparser\" will be", "If so, we CANNOT let the user install this, because", "it manually and run ' 'setup.py again.', file=sys.stderr) sys.exit(1) except", "'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'], } REQUIRES = [] TEST_REQUIRES", "3 and 2\" LONG_DESC = src.future.__doc__ AUTHOR = \"<NAME>\" AUTHOR_EMAIL", "past python3 migration futurize backport six 2to3 modernize pasteurize 3to2\"", "PACKAGE_DATA = {'': [ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh',", "breaking the # user's Py3 installation if they run \"python2", "= [] if sys.version_info[:2] == (2, 6): REQUIRES += ['importlib',", "[] if sys.version_info[:2] == (2, 6): REQUIRES += ['importlib', 'argparse']", "len(set(files) & set(SYSTEM_MODULES)) > 0: print('ERROR: Your build folder is", "break his/her Python 3 install, depending on the folder order", "Python 3 install, depending on the folder order in #", "{} # * Important * # We forcibly remove the", "exist in build/lib? files = os.listdir(os.path.join('build', 'lib')) if len(set(files) &", "os.listdir(os.path.join('build', 'lib')) if len(set(files) & set(SYSTEM_MODULES)) > 0: print('ERROR: Your", "\"Programming Language :: Python\", \"Programming Language :: Python :: 2.6\",", "\"future.moves.html\", \"future.moves.http\", \"future.moves.test\", \"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\", # for future.tests.base", "\"Programming Language :: Python :: 3.3\", \"Programming Language :: Python", "== 'publish': os.system('python setup.py sdist upload') sys.exit() NAME = \"future\"", "'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc' ]) if sys.version_info[0] >= 3:", "'tkinter', 'winreg', 'xmlrpc' ]) if sys.version_info[0] >= 3: # Do", "this may break his/her Python 3 install, depending on the", "Py2 # substitute packages, instead of the intended system stdlib", "because # this may break his/her Python 3 install, depending", "4 - Beta\", \"Intended Audience :: Developers\", ] setup_kwds =", "Your build folder is in an inconsistent state for '", "__future__ import absolute_import, print_function import os import os.path import sys", "Language :: Python\", \"Programming Language :: Python :: 2.6\", \"Programming", "\"License :: OSI Approved\", \"License :: OSI Approved :: MIT", "# then folders like \"configparser\" will be in build/lib. #", "\"reprlib\", \"socketserver\", \"tkinter\", \"winreg\", \"xmlrpc\", \"_dummy_thread\", \"_markupbase\", \"_thread\", ] PACKAGE_DATA", "# substitute packages, instead of the intended system stdlib modules.)", "author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [", "if sys.version_info[:2] < (3, 0): PACKAGES += [ \"builtins\", \"configparser\",", "build/lib. # If so, we CANNOT let the user install", "forcibly remove the build folder to avoid breaking the #", "\"Programming Language :: Python :: 2.7\", \"Programming Language :: Python", "modules.) SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser', 'copyreg',", "'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'], } REQUIRES = []", ":: MIT License\", \"Development Status :: 4 - Beta\", \"Intended", "< (3, 0): PACKAGES += [ \"builtins\", \"configparser\", \"copyreg\", \"html\",", "'xmlrpc' ]) if sys.version_info[0] >= 3: # Do any of", ":: 2.7\", \"Programming Language :: Python :: 3\", \"Programming Language", "'_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser', 'copyreg', 'html', 'http', 'queue', 'reprlib',", "\"_markupbase\", \"_thread\", ] PACKAGE_DATA = {'': [ 'README.rst', 'LICENSE.txt', 'futurize.py',", "support for Python 3 and 2\" LONG_DESC = src.future.__doc__ AUTHOR", "user's Py3 installation if they run \"python2 setup.py # build\"", "sys.path. (Running \"import configparser\" etc. may pick up our Py2", "print_function import os import os.path import sys try: from setuptools", "folder is in an inconsistent state for ' 'a Python", "Language :: Python :: 2.6\", \"Programming Language :: Python ::", "keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize = libfuturize.main:main', 'pasteurize = libpasteurize.main:main'", ":: Python :: 2.7\", \"Programming Language :: Python :: 3\",", "src.future VERSION = src.future.__version__ DESCRIPTION = \"Clean single-source support for", "#!/usr/bin/env python from __future__ import absolute_import, print_function import os import", "3\", \"Programming Language :: Python :: 3.3\", \"Programming Language ::", "# \"past.tests\", \"past.translation\", \"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\", ] # PEP", "\"_thread\", ] PACKAGE_DATA = {'': [ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py',", "import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload')", "future.tests.base # \"future.tests.test_email\", \"future.utils\", \"past\", \"past.builtins\", \"past.types\", \"past.utils\", # \"past.tests\",", "[] TEST_REQUIRES = [] if sys.version_info[:2] == (2, 6): REQUIRES", "Python\", \"Programming Language :: Python :: 2.6\", \"Programming Language ::", ":: OSI Approved :: MIT License\", \"Development Status :: 4", "VERSION = src.future.__version__ DESCRIPTION = \"Clean single-source support for Python", "packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite = \"discover_tests\", tests_require=TEST_REQUIRES, **setup_kwds", "on the folder order in # sys.path. (Running \"import configparser\"", "0): PACKAGES += [ \"builtins\", \"configparser\", \"copyreg\", \"html\", \"http\", \"queue\",", "\"libpasteurize.fixes\", ] # PEP 3108 stdlib moves: if sys.version_info[:2] <", "happens to run: # python2 setup.py build # python3 setup.py", "os.path import sys try: from setuptools import setup except ImportError:", ">= 3: # Do any of the above folders exist", "files = os.listdir(os.path.join('build', 'lib')) if len(set(files) & set(SYSTEM_MODULES)) > 0:", "= \"future\" PACKAGES = [\"future\", \"future.builtins\", \"future.types\", \"future.standard_library\", \"future.backports\", \"future.backports.email\",", "sys.version_info[0] >= 3: # Do any of the above folders", "and run ' 'setup.py again.', file=sys.stderr) sys.exit(1) except OSError: pass", "LONG_DESC = src.future.__doc__ AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" URL=\"https://python-future.org\"", "if sys.version_info[0] >= 3: # Do any of the above", "setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={", "sys.version_info[:2] == (2, 6): REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES +=", "sdist upload') sys.exit() NAME = \"future\" PACKAGES = [\"future\", \"future.builtins\",", ":: 3\", \"Programming Language :: Python :: 3.3\", \"Programming Language", "\"future.backports\", \"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\",", "in build/lib. # If so, we CANNOT let the user", "TEST_REQUIRES += ['unittest2'] import src.future VERSION = src.future.__version__ DESCRIPTION =", ":: Python :: 3\", \"Programming Language :: Python :: 3.3\",", ":: 3.4\", \"License :: OSI Approved\", \"License :: OSI Approved", "== (2, 6): REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES += ['unittest2']", "folders like \"configparser\" will be in build/lib. # If so,", "this, because # this may break his/her Python 3 install,", "license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize = libfuturize.main:main', 'pasteurize =", "src.future.__doc__ AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" URL=\"https://python-future.org\" LICENSE =", "let the user install this, because # this may break", "his/her Python 3 install, depending on the folder order in", "again.', file=sys.stderr) sys.exit(1) except OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL,", "stdlib modules.) SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser',", "so, we CANNOT let the user install this, because #", "import os import os.path import sys try: from setuptools import", "Status :: 4 - Beta\", \"Intended Audience :: Developers\", ]", "[\"future\", \"future.builtins\", \"future.types\", \"future.standard_library\", \"future.backports\", \"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\", \"future.backports.test\",", "Approved\", \"License :: OSI Approved :: MIT License\", \"Development Status", "3: # Do any of the above folders exist in", "upload') sys.exit() NAME = \"future\" PACKAGES = [\"future\", \"future.builtins\", \"future.types\",", "# python2 setup.py build # python3 setup.py install # then", "description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize = libfuturize.main:main',", "\"past.utils\", # \"past.tests\", \"past.translation\", \"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\", ] #", ":: 3.3\", \"Programming Language :: Python :: 3.4\", \"License ::", "system stdlib modules.) SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase', '_thread', 'builtins',", "folders exist in build/lib? files = os.listdir(os.path.join('build', 'lib')) if len(set(files)", "try: from setuptools import setup except ImportError: from distutils.core import", "'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'], } REQUIRES", "setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit()", "}, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite =", "# sys.path. (Running \"import configparser\" etc. may pick up our", "[ \"Programming Language :: Python\", \"Programming Language :: Python ::", "'argparse'] TEST_REQUIRES += ['unittest2'] import src.future VERSION = src.future.__version__ DESCRIPTION", "] }, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite", "absolute_import, print_function import os import os.path import sys try: from", "we CANNOT let the user install this, because # this", "import absolute_import, print_function import os import os.path import sys try:", "Please remove it manually and run ' 'setup.py again.', file=sys.stderr)", "= libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ] }, package_dir={'': 'src'}, packages=PACKAGES,", "\"Clean single-source support for Python 3 and 2\" LONG_DESC =", "'futurize = libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ] }, package_dir={'': 'src'},", "# If so, we CANNOT let the user install this,", "Py3 installation if they run \"python2 setup.py # build\" and", "# this may break his/her Python 3 install, depending on", "\"past.types\", \"past.utils\", # \"past.tests\", \"past.translation\", \"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\", ]", "\"License :: OSI Approved :: MIT License\", \"Development Status ::", "instead of the intended system stdlib modules.) SYSTEM_MODULES = set([", "sys.exit(1) except OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION,", "python from __future__ import absolute_import, print_function import os import os.path", "setup.py build # python3 setup.py install # then folders like", "' 'setup.py again.', file=sys.stderr) sys.exit(1) except OSError: pass setup(name=NAME, version=VERSION,", "\"Programming Language :: Python :: 2.6\", \"Programming Language :: Python", "to avoid breaking the # user's Py3 installation if they", "\"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\", \"future.moves.html\", \"future.moves.http\",", "\"past.translation\", \"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\", ] # PEP 3108 stdlib", "the build folder to avoid breaking the # user's Py3", "the user happens to run: # python2 setup.py build #", "src.future.__version__ DESCRIPTION = \"Clean single-source support for Python 3 and", "\"future.moves.tkinter\", \"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\", # for future.tests.base # \"future.tests.test_email\", \"future.utils\",", "] setup_kwds = {} # * Important * # We", "= {} # * Important * # We forcibly remove", "the # user's Py3 installation if they run \"python2 setup.py", "of the above folders exist in build/lib? files = os.listdir(os.path.join('build',", "install. Please remove it manually and run ' 'setup.py again.',", "\"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\", ] # PEP 3108 stdlib moves: if", "TEST_REQUIRES = [] if sys.version_info[:2] == (2, 6): REQUIRES +=", "substitute packages, instead of the intended system stdlib modules.) SYSTEM_MODULES", "\"past.builtins\", \"past.types\", \"past.utils\", # \"past.tests\", \"past.translation\", \"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\",", "= {'': [ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt',", "\"import configparser\" etc. may pick up our Py2 # substitute", "0: print('ERROR: Your build folder is in an inconsistent state", "build/lib? files = os.listdir(os.path.join('build', 'lib')) if len(set(files) & set(SYSTEM_MODULES)) >", "backport six 2to3 modernize pasteurize 3to2\" CLASSIFIERS = [ \"Programming", "[ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests':", ":: Python\", \"Programming Language :: Python :: 2.6\", \"Programming Language", "], 'tests': ['*.py'], } REQUIRES = [] TEST_REQUIRES = []", "- Beta\", \"Intended Audience :: Developers\", ] setup_kwds = {}", "# PEP 3108 stdlib moves: if sys.version_info[:2] < (3, 0):", "] PACKAGE_DATA = {'': [ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py',", "\"past.tests\", \"past.translation\", \"libfuturize\", \"libfuturize.fixes\", \"libpasteurize\", \"libpasteurize.fixes\", ] # PEP 3108", "AUTHOR_EMAIL = \"<EMAIL>\" URL=\"https://python-future.org\" LICENSE = \"MIT\" KEYWORDS = \"future", "REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES += ['unittest2'] import src.future VERSION", "Approved :: MIT License\", \"Development Status :: 4 - Beta\",", "# \"future.tests.test_email\", \"future.utils\", \"past\", \"past.builtins\", \"past.types\", \"past.utils\", # \"past.tests\", \"past.translation\",", "(2, 6): REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES += ['unittest2'] import", "Python :: 3.4\", \"License :: OSI Approved\", \"License :: OSI", "['unittest2'] import src.future VERSION = src.future.__version__ DESCRIPTION = \"Clean single-source", "then folders like \"configparser\" will be in build/lib. # If", ":: 4 - Beta\", \"Intended Audience :: Developers\", ] setup_kwds", "if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() NAME", "NAME = \"future\" PACKAGES = [\"future\", \"future.builtins\", \"future.types\", \"future.standard_library\", \"future.backports\",", "\"future.moves.urllib\", \"future.moves.xmlrpc\", \"future.tests\", # for future.tests.base # \"future.tests.test_email\", \"future.utils\", \"past\",", "* Important * # We forcibly remove the build folder", "the above folders exist in build/lib? files = os.listdir(os.path.join('build', 'lib'))", "\"<EMAIL>\" URL=\"https://python-future.org\" LICENSE = \"MIT\" KEYWORDS = \"future past python3", "order in # sys.path. (Running \"import configparser\" etc. may pick", "folder to avoid breaking the # user's Py3 installation if", "for ' 'a Python 3.x install. Please remove it manually", "\"future.backports.email\", \"future.backports.email.mime\", \"future.backports.html\", \"future.backports.http\", \"future.backports.test\", \"future.backports.urllib\", \"future.backports.xmlrpc\", \"future.moves\", \"future.moves.dbm\", \"future.moves.html\",", "'configparser', 'copyreg', 'html', 'http', 'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc'", "= \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" URL=\"https://python-future.org\" LICENSE = \"MIT\" KEYWORDS", "3 install, depending on the folder order in # sys.path.", "install\". try: # If the user happens to run: #", ":: Python :: 2.6\", \"Programming Language :: Python :: 2.7\",", "['importlib', 'argparse'] TEST_REQUIRES += ['unittest2'] import src.future VERSION = src.future.__version__", "# python3 setup.py install # then folders like \"configparser\" will", "# user's Py3 installation if they run \"python2 setup.py #", "\"xmlrpc\", \"_dummy_thread\", \"_markupbase\", \"_thread\", ] PACKAGE_DATA = {'': [ 'README.rst',", "sys.version_info[:2] < (3, 0): PACKAGES += [ \"builtins\", \"configparser\", \"copyreg\"," ]
[ "connect to Database and register Blueprint of routes\"\"\" app =", "import Flask from .extensions import db from .routes import short", "app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push()", "config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push() db.init_app(app) db.create_all() app.register_blueprint(short) return app", "flask import Flask from .extensions import db from .routes import", "register Blueprint of routes\"\"\" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI", "Flask from .extensions import db from .routes import short from", ".routes import short from . import config def create_app(): \"\"\"", "from . import config def create_app(): \"\"\" Creates Flask App,", "from .routes import short from . import config def create_app():", "routes\"\"\" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False", ".extensions import db from .routes import short from . import", "Blueprint of routes\"\"\" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS']", "db from .routes import short from . import config def", "and register Blueprint of routes\"\"\" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] =", "create_app(): \"\"\" Creates Flask App, connect to Database and register", "Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push() db.init_app(app) db.create_all()", "from flask import Flask from .extensions import db from .routes", "config def create_app(): \"\"\" Creates Flask App, connect to Database", ". import config def create_app(): \"\"\" Creates Flask App, connect", "of routes\"\"\" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] =", "<gh_stars>0 from flask import Flask from .extensions import db from", "to Database and register Blueprint of routes\"\"\" app = Flask(__name__)", "from .extensions import db from .routes import short from .", "import config def create_app(): \"\"\" Creates Flask App, connect to", "Database and register Blueprint of routes\"\"\" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI']", "import db from .routes import short from . import config", "import short from . import config def create_app(): \"\"\" Creates", "short from . import config def create_app(): \"\"\" Creates Flask", "app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push() db.init_app(app) db.create_all() app.register_blueprint(short)", "Flask App, connect to Database and register Blueprint of routes\"\"\"", "def create_app(): \"\"\" Creates Flask App, connect to Database and", "Creates Flask App, connect to Database and register Blueprint of", "= config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push() db.init_app(app) db.create_all() app.register_blueprint(short) return", "= Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push() db.init_app(app)", "\"\"\" Creates Flask App, connect to Database and register Blueprint", "App, connect to Database and register Blueprint of routes\"\"\" app" ]
[ "updatesql = \"UPDATE `t_stackoverflow_question` \" \\ \"SET `tags`='%s', `views`='%s', `answers_num`='%s',", "it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)',", "tag in tags: tags_str += tag.get_text() + \",\" answer_contetnts =", "`question_content`='%s', `answers_contetnt`='%s' \" \\ \"WHERE (`question_id`='%s') \" \\ % (item[4],", "Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0)", "> 1: asked_time = times[i].get(\"datetime\").replace(\"T\", \" \") item.append(title[ 0].get_text()) #", "Gecko/20100101 Firefox/10.0 \"] user_agent = random.choice(user_agents) headers = { 'User-Agent':", "SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko)", "BeautifulSoup(html_doc, \"html.parser\") times = soup.select(\"time\") views = soup.select(\"p.label-key > b\")", "= \"[split]\".join(item[7]) updatesql = \"UPDATE `t_stackoverflow_question` \" \\ \"SET `tags`='%s',", "= requests.get(url=url, headers=headers) html_doc = req.text soup = BeautifulSoup(html_doc, \"html.parser\")", "libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like", "\"div.post-taglist.grid.gs4.gsy.fd-column > div >a\") title = soup.select(\"h1 >a\") tags_str =", "CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',", "import time def html(url): user_agents = [ 'Mozilla/5.0 (Windows; U;", "time def html(url): user_agents = [ 'Mozilla/5.0 (Windows; U; Windows", "2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0", "asked_time tag_str active_time quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str)", "print(item) # updatetosql(item) def updatetosql(item): ansers_text = \"[split]\".join(item[7]) updatesql =", "Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0", "`answers_contetnt`='%s' \" \\ \"WHERE (`question_id`='%s') \" \\ % (item[4], item[1],", "answer_contetnts.append(question_content[i]) for i in range(len(times)): if len(times[i].get_text()) > 1: asked_time", "b\") active_str = str(views[2]) active = active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")] answers", "Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731", "> h2 >span\") question_content = soup.select(\"div.post-text\") tags = soup.select(\"#question >", "answersnum asked_time tag_str active_time quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time)", "len(times[i].get_text()) > 1: asked_time = times[i].get(\"datetime\").replace(\"T\", \" \") item.append(title[ 0].get_text())", "+ 7:active_str.find(\"Z\")] answers = soup.select(\"#answers-header > div > h2 >span\")", "os import random import time def html(url): user_agents = [", "Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1", "= [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11)", "U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows", "\"[split]\".join(item[7]) updatesql = \"UPDATE `t_stackoverflow_question` \" \\ \"SET `tags`='%s', `views`='%s',", "(X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \"] user_agent =", "`asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' \" \\ \"WHERE (`question_id`='%s') \" \\", "# title views answersnum asked_time tag_str active_time quest_content_ text answer_content_list", "Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \"] user_agent = random.choice(user_agents) headers", "0].get_text()) # title views answersnum asked_time tag_str active_time quest_content_ text", "\\ \"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' \"", "'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11;", "active = active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")] answers = soup.select(\"#answers-header > div", "h2 >span\") question_content = soup.select(\"div.post-text\") tags = soup.select(\"#question > div.post-layout", "question_content = soup.select(\"div.post-text\") tags = soup.select(\"#question > div.post-layout > div.postcell.post-layout--right", "tags_str += tag.get_text() + \",\" answer_contetnts = [] for i", "= soup.select(\"h1 >a\") tags_str = \"\" item = [] for", "Firefox/10.0 \"] user_agent = random.choice(user_agents) headers = { 'User-Agent': user_agent,", "i686; rv:10.0) Gecko/20100101 Firefox/10.0 \"] user_agent = random.choice(user_agents) headers =", "req = requests.get(url=url, headers=headers) html_doc = req.text soup = BeautifulSoup(html_doc,", "+ \",\" answer_contetnts = [] for i in range(1, len(question_content)):", "asked_time = times[i].get(\"datetime\").replace(\"T\", \" \") item.append(title[ 0].get_text()) # title views", "Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0 (X11; Linux", "= [] for tag in tags: tags_str += tag.get_text() +", "item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item) def updatetosql(item): ansers_text =", "\\ % (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],)", "(item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],) pass if", "AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0 (X11;", "NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1;", "5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U;", "[] for i in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i in", "= \"UPDATE `t_stackoverflow_question` \" \\ \"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s',", "\" \\ % (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text,", "'gzip'} req = requests.get(url=url, headers=headers) html_doc = req.text soup =", "import random import time def html(url): user_agents = [ 'Mozilla/5.0", "\"html.parser\") times = soup.select(\"time\") views = soup.select(\"p.label-key > b\") active_str", "\" \\ \"WHERE (`question_id`='%s') \" \\ % (item[4], item[1], item[2],", "item.append(answer_contetnts) print(item) # updatetosql(item) def updatetosql(item): ansers_text = \"[split]\".join(item[7]) updatesql", "for tag in tags: tags_str += tag.get_text() + \",\" answer_contetnts", "'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',", "\" \\ \"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s'", "\"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \"] user_agent", "views answersnum asked_time tag_str active_time quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text())", "'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0;", "def updatetosql(item): ansers_text = \"[split]\".join(item[7]) updatesql = \"UPDATE `t_stackoverflow_question` \"", "[ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127", "GNUTLS/1.2.9', \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04", "random.choice(user_agents) headers = { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req =", "soup.select(\"p.label-key > b\") active_str = str(views[2]) active = active_str[active_str.find(\"title=\\\"\") +", "rv:10.0) Gecko/20100101 Firefox/10.0 \"] user_agent = random.choice(user_agents) headers = {", "item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item) def updatetosql(item): ansers_text = \"[split]\".join(item[7])", "from bs4 import BeautifulSoup import urllib.request import os import random", "= \"\" item = [] for tag in tags: tags_str", "Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',", ".NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko)", "\") item.append(title[ 0].get_text()) # title views answersnum asked_time tag_str active_time", "item = [] for tag in tags: tags_str += tag.get_text()", "Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0 (X11; Ubuntu; Linux i686;", "= times[i].get(\"datetime\").replace(\"T\", \" \") item.append(title[ 0].get_text()) # title views answersnum", "requests.get(url=url, headers=headers) html_doc = req.text soup = BeautifulSoup(html_doc, \"html.parser\") times", "(X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77", "tag_str active_time quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active)", "= { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req = requests.get(url=url, headers=headers)", "NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0", "Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7", "\"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' \" \\", "> div.post-layout > div.postcell.post-layout--right > \" \"div.post-taglist.grid.gs4.gsy.fd-column > div >a\")", "en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0", "for i in range(len(times)): if len(times[i].get_text()) > 1: asked_time =", "title views answersnum asked_time tag_str active_time quest_content_ text answer_content_list item.append(views[1].get_text())", "item[3], item[5], item[6], ansers_text, item[0],) pass if __name__ == '__main__':", "en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET", "tags_str = \"\" item = [] for tag in tags:", "(compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322;", "import os import random import time def html(url): user_agents =", "'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML,", "import requests from bs4 import BeautifulSoup import urllib.request import os", "user_agent = random.choice(user_agents) headers = { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'}", "soup = BeautifulSoup(html_doc, \"html.parser\") times = soup.select(\"time\") views = soup.select(\"p.label-key", "6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR", "(Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security", "import urllib.request import os import random import time def html(url):", "answers = soup.select(\"#answers-header > div > h2 >span\") question_content =", "Safari/535.7\", \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \"]", "active_str = str(views[2]) active = active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")] answers =", "'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req = requests.get(url=url, headers=headers) html_doc =", "times = soup.select(\"time\") views = soup.select(\"p.label-key > b\") active_str =", "= active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")] answers = soup.select(\"#answers-header > div >", "req.text soup = BeautifulSoup(html_doc, \"html.parser\") times = soup.select(\"time\") views =", "soup.select(\"#answers-header > div > h2 >span\") question_content = soup.select(\"div.post-text\") tags", "headers=headers) html_doc = req.text soup = BeautifulSoup(html_doc, \"html.parser\") times =", "html_doc = req.text soup = BeautifulSoup(html_doc, \"html.parser\") times = soup.select(\"time\")", "NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT", "Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101", "= random.choice(user_agents) headers = { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req", "div > h2 >span\") question_content = soup.select(\"div.post-text\") tags = soup.select(\"#question", "ansers_text = \"[split]\".join(item[7]) updatesql = \"UPDATE `t_stackoverflow_question` \" \\ \"SET", "= [] for i in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i", "\" \"div.post-taglist.grid.gs4.gsy.fd-column > div >a\") title = soup.select(\"h1 >a\") tags_str", "`views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' \" \\ \"WHERE (`question_id`='%s')", "5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1;", "> \" \"div.post-taglist.grid.gs4.gsy.fd-column > div >a\") title = soup.select(\"h1 >a\")", ".NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux)", "soup.select(\"time\") views = soup.select(\"p.label-key > b\") active_str = str(views[2]) active", "div >a\") title = soup.select(\"h1 >a\") tags_str = \"\" item", "'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR", "> div >a\") title = soup.select(\"h1 >a\") tags_str = \"\"", "{ 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req = requests.get(url=url, headers=headers) html_doc", "title = soup.select(\"h1 >a\") tags_str = \"\" item = []", ">a\") tags_str = \"\" item = [] for tag in", "\"UPDATE `t_stackoverflow_question` \" \\ \"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s',", "soup.select(\"#question > div.post-layout > div.postcell.post-layout--right > \" \"div.post-taglist.grid.gs4.gsy.fd-column > div", "= soup.select(\"time\") views = soup.select(\"p.label-key > b\") active_str = str(views[2])", "5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible;", "urllib.request import os import random import time def html(url): user_agents", "\"\" item = [] for tag in tags: tags_str +=", "(`question_id`='%s') \" \\ % (item[4], item[1], item[2], item[3], item[5], item[6],", "BeautifulSoup import urllib.request import os import random import time def", "div.post-layout > div.postcell.post-layout--right > \" \"div.post-taglist.grid.gs4.gsy.fd-column > div >a\") title", "views = soup.select(\"p.label-key > b\") active_str = str(views[2]) active =", "CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5", "active_time quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0])", "# updatetosql(item) def updatetosql(item): ansers_text = \"[split]\".join(item[7]) updatesql = \"UPDATE", "= BeautifulSoup(html_doc, \"html.parser\") times = soup.select(\"time\") views = soup.select(\"p.label-key >", "headers = { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req = requests.get(url=url,", "(Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25", "i in range(len(times)): if len(times[i].get_text()) > 1: asked_time = times[i].get(\"datetime\").replace(\"T\",", "= req.text soup = BeautifulSoup(html_doc, \"html.parser\") times = soup.select(\"time\") views", "like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0 (X11; Ubuntu; Linux", "soup.select(\"div.post-text\") tags = soup.select(\"#question > div.post-layout > div.postcell.post-layout--right > \"", "Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 \"] user_agent = random.choice(user_agents)", "Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT", "len(question_content)): answer_contetnts.append(question_content[i]) for i in range(len(times)): if len(times[i].get_text()) > 1:", "updatetosql(item): ansers_text = \"[split]\".join(item[7]) updatesql = \"UPDATE `t_stackoverflow_question` \" \\", "`t_stackoverflow_question` \" \\ \"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s',", "`tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' \" \\ \"WHERE", "div.postcell.post-layout--right > \" \"div.post-taglist.grid.gs4.gsy.fd-column > div >a\") title = soup.select(\"h1", "times[i].get(\"datetime\").replace(\"T\", \" \") item.append(title[ 0].get_text()) # title views answersnum asked_time", "Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux", "active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")] answers = soup.select(\"#answers-header > div > h2", "def html(url): user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT", "i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0", "(X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1", "(compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U;", "KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US;", "'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',", ">a\") title = soup.select(\"h1 >a\") tags_str = \"\" item =", "U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14", "user_agent, 'Accept-Encoding': 'gzip'} req = requests.get(url=url, headers=headers) html_doc = req.text", "str(views[2]) active = active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")] answers = soup.select(\"#answers-header >", "text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item)", "1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like", "item[5], item[6], ansers_text, item[0],) pass if __name__ == '__main__': html(\"https://stackoverflow.com/questions/50119673/nginx-fast-cgi-cache-on-error-page-404\")", "`answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' \" \\ \"WHERE (`question_id`='%s') \"", "Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\",", "= soup.select(\"#answers-header > div > h2 >span\") question_content = soup.select(\"div.post-text\")", "tags = soup.select(\"#question > div.post-layout > div.postcell.post-layout--right > \" \"div.post-taglist.grid.gs4.gsy.fd-column", "> div.postcell.post-layout--right > \" \"div.post-taglist.grid.gs4.gsy.fd-column > div >a\") title =", "if len(times[i].get_text()) > 1: asked_time = times[i].get(\"datetime\").replace(\"T\", \" \") item.append(title[", "(Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows", "[] for tag in tags: tags_str += tag.get_text() + \",\"", "updatetosql(item) def updatetosql(item): ansers_text = \"[split]\".join(item[7]) updatesql = \"UPDATE `t_stackoverflow_question`", "rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0", "SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5;", "= soup.select(\"#question > div.post-layout > div.postcell.post-layout--right > \" \"div.post-taglist.grid.gs4.gsy.fd-column >", "> div > h2 >span\") question_content = soup.select(\"div.post-text\") tags =", "item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],) pass if __name__", "\",\" answer_contetnts = [] for i in range(1, len(question_content)): answer_contetnts.append(question_content[i])", "in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i in range(len(times)): if len(times[i].get_text())", "= soup.select(\"p.label-key > b\") active_str = str(views[2]) active = active_str[active_str.find(\"title=\\\"\")", "answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) #", "tags: tags_str += tag.get_text() + \",\" answer_contetnts = [] for", "`last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' \" \\ \"WHERE (`question_id`='%s') \" \\ %", "item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item) def updatetosql(item): ansers_text", "in tags: tags_str += tag.get_text() + \",\" answer_contetnts = []", "range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i in range(len(times)): if len(times[i].get_text()) >", "in range(len(times)): if len(times[i].get_text()) > 1: asked_time = times[i].get(\"datetime\").replace(\"T\", \"", "Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible;", "(like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12)", "item.append(title[ 0].get_text()) # title views answersnum asked_time tag_str active_time quest_content_", "random import time def html(url): user_agents = [ 'Mozilla/5.0 (Windows;", "rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0 (X11;", "requests from bs4 import BeautifulSoup import urllib.request import os import", "item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item)", "Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', \"Mozilla/5.0 (X11; Linux i686)", "= str(views[2]) active = active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")] answers = soup.select(\"#answers-header", "\"] user_agent = random.choice(user_agents) headers = { 'User-Agent': user_agent, 'Accept-Encoding':", "Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686;", "Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE", "'Accept-Encoding': 'gzip'} req = requests.get(url=url, headers=headers) html_doc = req.text soup", ">span\") question_content = soup.select(\"div.post-text\") tags = soup.select(\"#question > div.post-layout >", "soup.select(\"h1 >a\") tags_str = \"\" item = [] for tag", "item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item) def", "item[2], item[3], item[5], item[6], ansers_text, item[0],) pass if __name__ ==", "item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item) def updatetosql(item):", "% (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],) pass", "> b\") active_str = str(views[2]) active = active_str[active_str.find(\"title=\\\"\") + 7:active_str.find(\"Z\")]", "import BeautifulSoup import urllib.request import os import random import time", "\" \") item.append(title[ 0].get_text()) # title views answersnum asked_time tag_str", "answer_contetnts = [] for i in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for", "range(len(times)): if len(times[i].get_text()) > 1: asked_time = times[i].get(\"datetime\").replace(\"T\", \" \")", "\"WHERE (`question_id`='%s') \" \\ % (item[4], item[1], item[2], item[3], item[5],", "quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts)", "= soup.select(\"div.post-text\") tags = soup.select(\"#question > div.post-layout > div.postcell.post-layout--right >", "\"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77", "tag.get_text() + \",\" answer_contetnts = [] for i in range(1,", "for i in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i in range(len(times)):", "user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it;", "bs4 import BeautifulSoup import urllib.request import os import random import", "U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1;", "i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',", "(KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7\", \"Mozilla/5.0 (X11; Ubuntu;", "MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET", "7:active_str.find(\"Z\")] answers = soup.select(\"#answers-header > div > h2 >span\") question_content", "\\ \"WHERE (`question_id`='%s') \" \\ % (item[4], item[1], item[2], item[3],", "+= tag.get_text() + \",\" answer_contetnts = [] for i in", "i in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i in range(len(times)): if", "html(url): user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1;", "1: asked_time = times[i].get(\"datetime\").replace(\"T\", \" \") item.append(title[ 0].get_text()) # title" ]
[ "X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv, y_valid", ":, :, np.newaxis], angl[:, :, :, np.newaxis]], axis=-1) return images", "/ stds[1] angl = (angl - means[2]) / stds[2] images", "stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in", ":, np.newaxis], angl[:, :, :, np.newaxis]], axis=-1) return images def", "y_train_cv, y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test)", "angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if", "sklearn.model_selection import KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path, \"./train.json\"))", "= y_train[train_indices] X_val = X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv,", "= (X_band_2 - means[1]) / stds[1] angl = (angl -", "as pd from keras.utils import to_categorical from sklearn.model_selection import KFold,", "y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test = preprocess(test) return (kfold_data,", "'na' else means[3]) angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel", "in angl]) X_band_1 = (X_band_1 - means[0]) / stds[0] X_band_2", "(X_band_1 - means[0]) / stds[0] X_band_2 = (X_band_2 - means[1])", "def prepare_data_cv(path): train, test = load_data(path) X_train, y_train = (preprocess(train),", "= preprocess(test) return (kfold_data, X_test) def prepare_data(path): train, test =", "return (kfold_data, X_test) def prepare_data(path): train, test = load_data(path) X_train,", "y_train, random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test) return ([(X_train_cv, y_train_cv, X_valid,", "df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if x !=", "X_test) def prepare_data(path): train, test = load_data(path) X_train, y_train =", "pd from keras.utils import to_categorical from sklearn.model_selection import KFold, train_test_split", "shuffle=True, random_state=0xCAFFE) for train_indices, val_indices in kf.split(y_train): X_train_cv = X_train[train_indices]", "= np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_2\"]]) angl = df['inc_angle'].map(lambda", "4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_1\"]])", "band in df[\"band_1\"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in", "X_test = preprocess(test) return (kfold_data, X_test) def prepare_data(path): train, test", "test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data", "def prepare_data(path): train, test = load_data(path) X_train, y_train = (preprocess(train),", "X_val = X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val))", "np.cos(x * np.pi / 180) if x != 'na' else", "means[0]) / stds[0] X_band_2 = (X_band_2 - means[1]) / stds[1]", "fill_value=angel).astype(np.float32) for angel in angl]) X_band_1 = (X_band_1 - means[0])", "keras.utils import to_categorical from sklearn.model_selection import KFold, train_test_split def load_data(path):", "angl]) X_band_1 = (X_band_1 - means[0]) / stds[0] X_band_2 =", "= KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices, val_indices in kf.split(y_train): X_train_cv", "1))) X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8)", "return (train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958,", "in df[\"band_1\"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_2\"]])", "x != 'na' else means[3]) angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32)", "= np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_1\"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75,", "X_train[train_indices] y_train_cv = y_train[train_indices] X_val = X_train[val_indices] y_val = y_train[val_indices]", "as np import pandas as pd from keras.utils import to_categorical", "KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path, \"./train.json\")) test =", "import pandas as pd from keras.utils import to_categorical from sklearn.model_selection", "angel in angl]) X_band_1 = (X_band_1 - means[0]) / stds[0]", "means[3]) angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel in angl])", "os import numpy as np import pandas as pd from", "stds[0] X_band_2 = (X_band_2 - means[1]) / stds[1] angl =", "np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_2\"]]) angl = df['inc_angle'].map(lambda x:", "images def prepare_data_cv(path): train, test = load_data(path) X_train, y_train =", "np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel in angl]) X_band_1 = (X_band_1", "= np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel in angl]) X_band_1 =", "means[1]) / stds[1] angl = (angl - means[2]) / stds[2]", "images = np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis],", "means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75)", "train, test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1)))", "pandas as pd from keras.utils import to_categorical from sklearn.model_selection import", "= load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid,", "pd.read_json(os.path.join(path, \"./train.json\")) test = pd.read_json(os.path.join(path, \"./test.json\")) return (train, test) def", "(angl - means[2]) / stds[2] images = np.concatenate([X_band_1[:, :, :,", "KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices, val_indices in kf.split(y_train): X_train_cv =", "y_val)) X_test = preprocess(test) return (kfold_data, X_test) def prepare_data(path): train,", "(train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)):", "np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_1\"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75)", "X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = [] kf", "= load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data =", "def load_data(path): train = pd.read_json(os.path.join(path, \"./train.json\")) test = pd.read_json(os.path.join(path, \"./test.json\"))", "else means[3]) angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel in", "test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv,", "= (angl - means[2]) / stds[2] images = np.concatenate([X_band_1[:, :,", "75) for band in df[\"band_1\"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for", "X_band_2 = (X_band_2 - means[1]) / stds[1] angl = (angl", "kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices, val_indices in kf.split(y_train):", "y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv, y_valid =", "= (X_band_1 - means[0]) / stds[0] X_band_2 = (X_band_2 -", "X_valid, y_train_cv, y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test =", "test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1", "- means[0]) / stds[0] X_band_2 = (X_band_2 - means[1]) /", "= X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test", "= pd.read_json(os.path.join(path, \"./train.json\")) test = pd.read_json(os.path.join(path, \"./test.json\")) return (train, test)", ":, :, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :, :,", "(preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = [] kf = KFold(n_splits=5, shuffle=True,", "y_train[train_indices] X_val = X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val,", "for band in df[\"band_2\"]]) angl = df['inc_angle'].map(lambda x: np.cos(x *", "* np.pi / 180) if x != 'na' else means[3])", "train_indices, val_indices in kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv = y_train[train_indices]", "np.pi / 180) if x != 'na' else means[3]) angl", "angl[:, :, :, np.newaxis]], axis=-1) return images def prepare_data_cv(path): train,", "in kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv = y_train[train_indices] X_val =", "random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test) return ([(X_train_cv, y_train_cv, X_valid, y_valid)],", "import numpy as np import pandas as pd from keras.utils", "kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test = preprocess(test) return (kfold_data, X_test)", "if x != 'na' else means[3]) angl = np.array([np.full(shape=(75, 75),", "/ 180) if x != 'na' else means[3]) angl =", "stds[1] angl = (angl - means[2]) / stds[2] images =", "kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv = y_train[train_indices] X_val = X_train[val_indices]", "import os import numpy as np import pandas as pd", "prepare_data_cv(path): train, test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1,", "= X_train[train_indices] y_train_cv = y_train[train_indices] X_val = X_train[val_indices] y_val =", "np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :,", ":, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :, :, np.newaxis]],", "X_train_cv = X_train[train_indices] y_train_cv = y_train[train_indices] X_val = X_train[val_indices] y_val", "test = pd.read_json(os.path.join(path, \"./test.json\")) return (train, test) def preprocess(df, means=(-22.159262,", "y_train_cv, X_val, y_val)) X_test = preprocess(test) return (kfold_data, X_test) def", "-24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for", "axis=-1) return images def prepare_data_cv(path): train, test = load_data(path) X_train,", "import KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path, \"./train.json\")) test", "X_band_2[:, :, :, np.newaxis], angl[:, :, :, np.newaxis]], axis=-1) return", "X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_2\"]]) angl =", "for band in df[\"band_1\"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band", "(X_band_2 - means[1]) / stds[1] angl = (angl - means[2])", "train = pd.read_json(os.path.join(path, \"./train.json\")) test = pd.read_json(os.path.join(path, \"./test.json\")) return (train,", "X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test =", "X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_1\"]]) X_band_2 =", "import to_categorical from sklearn.model_selection import KFold, train_test_split def load_data(path): train", "X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test", "y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test = preprocess(test)", "y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = [] kf =", "stds[2] images = np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :,", ":, np.newaxis]], axis=-1) return images def prepare_data_cv(path): train, test =", "75), fill_value=angel).astype(np.float32) for angel in angl]) X_band_1 = (X_band_1 -", "(kfold_data, X_test) def prepare_data(path): train, test = load_data(path) X_train, y_train", "def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 =", "= (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train,", "40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band", "= np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:,", "random_state=0xCAFFE) for train_indices, val_indices in kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv", "x: np.cos(x * np.pi / 180) if x != 'na'", "band in df[\"band_2\"]]) angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi", "numpy as np import pandas as pd from keras.utils import", "= (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = [] kf = KFold(n_splits=5,", "to_categorical from sklearn.model_selection import KFold, train_test_split def load_data(path): train =", "75) for band in df[\"band_2\"]]) angl = df['inc_angle'].map(lambda x: np.cos(x", "(preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train, y_train,", "1))) kfold_data = [] kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for", "y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test) return", "X_val, y_val)) X_test = preprocess(test) return (kfold_data, X_test) def prepare_data(path):", "train_size=0.8) X_test = preprocess(test) return ([(X_train_cv, y_train_cv, X_valid, y_valid)], X_test)", "np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :, :, np.newaxis]], axis=-1)", "= train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test) return ([(X_train_cv,", "/ stds[2] images = np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :,", "preprocess(test) return (kfold_data, X_test) def prepare_data(path): train, test = load_data(path)", "train_test_split def load_data(path): train = pd.read_json(os.path.join(path, \"./train.json\")) test = pd.read_json(os.path.join(path,", "!= 'na' else means[3]) angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for", "return images def prepare_data_cv(path): train, test = load_data(path) X_train, y_train", "X_band_1 = (X_band_1 - means[0]) / stds[0] X_band_2 = (X_band_2", "load_data(path): train = pd.read_json(os.path.join(path, \"./train.json\")) test = pd.read_json(os.path.join(path, \"./test.json\")) return", "from keras.utils import to_categorical from sklearn.model_selection import KFold, train_test_split def", "= df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if x", "df[\"band_2\"]]) angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180)", "4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_1\"]]) X_band_2", "df[\"band_1\"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df[\"band_2\"]]) angl", "= y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test = preprocess(test) return", "train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test) return ([(X_train_cv, y_train_cv,", "\"./train.json\")) test = pd.read_json(os.path.join(path, \"./test.json\")) return (train, test) def preprocess(df,", "in df[\"band_2\"]]) angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi /", "load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv,", "to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = [] kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE)", "np.newaxis]], axis=-1) return images def prepare_data_cv(path): train, test = load_data(path)", "for train_indices, val_indices in kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv =", "angl = (angl - means[2]) / stds[2] images = np.concatenate([X_band_1[:,", "- means[1]) / stds[1] angl = (angl - means[2]) /", "pd.read_json(os.path.join(path, \"./test.json\")) return (train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651),", "- means[2]) / stds[2] images = np.concatenate([X_band_1[:, :, :, np.newaxis],", "np.newaxis], angl[:, :, :, np.newaxis]], axis=-1) return images def prepare_data_cv(path):", "[] kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices, val_indices in", "\"./test.json\")) return (train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146,", "= pd.read_json(os.path.join(path, \"./test.json\")) return (train, test) def preprocess(df, means=(-22.159262, -24.953745,", "from sklearn.model_selection import KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path,", "to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE,", "preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75,", "angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel in angl]) X_band_1", "180) if x != 'na' else means[3]) angl = np.array([np.full(shape=(75,", "kfold_data = [] kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices,", "val_indices in kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv = y_train[train_indices] X_val", "for angel in angl]) X_band_1 = (X_band_1 - means[0]) /", ":, :, np.newaxis]], axis=-1) return images def prepare_data_cv(path): train, test", "prepare_data(path): train, test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1,", "y_train_cv = y_train[train_indices] X_val = X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv,", "np import pandas as pd from keras.utils import to_categorical from", "/ stds[0] X_band_2 = (X_band_2 - means[1]) / stds[1] angl", "means[2]) / stds[2] images = np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:,", "load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = []", "= [] kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices, val_indices" ]
[ "polyaxon experiment -xp 19 resources --gpu ``` Examples for getting", "Printer.print_success(\"Experiment updated.\") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True, default=False, help=\"Automatic yes", "get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\") if experiment.declarations: Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations) if experiment.last_metric:", "stop \" \"experiment `{}`\".format(_experiment)): click.echo('Existing without stopping experiment.') sys.exit(0) try:", "not yes and not click.confirm(\"Are sure you want to stop", "get_experiment_logs(): if past: try: response = PolyaxonClient().experiment.logs( user, project_name, _experiment,", "2 update --description=\"new description for my experiments\" ``` \\b ```bash", "Printer.print_error('Could not get logs for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e))", "as e: Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment --experiment=1 restart ```", "\\b ```bash $ polyaxon experiment -xp 19 resources -j 1", "\\\"yes\\\" as answer to all prompts and run non-interactively.\") @click.pass_context", "job logs: \\b ```bash $ polyaxon experiment -xp 1 -j", "unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is unbookmarked.\")", "ctx.obj.get('experiment')) if not yes and not click.confirm(\"Are sure you want", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int, help=\"To", "project_name, _experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy: response = PolyaxonClient().experiment.copy( user, project_name,", "for getting an experiment job: \\b ```bash $ polyaxon experiment", "\\b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get", "polyaxon experiment statuses -j 3 ``` \\b ```bash $ polyaxon", "bookmark ``` \\b ```bash $ polyaxon experiment -xp 2 bookmark", "\\b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j", "experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "of statuses.\") @click.pass_context @clean_outputs def statuses(ctx, job, page): \"\"\"Get experiment", "if description: update_dict['description'] = description tags = validate_tags(tags) if tags:", "PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager from", "show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if not follow: return except (PolyaxonHTTPError,", "`{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not", "humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header(\"Job info:\") dict_tabulate(response)", "_experiment, config=config, update_code=update_code) Printer.print_success('Experiment was copied with id {}'.format(response.id)) else:", "2 unbookmark ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "as e: Printer.print_error('Could not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message", "project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "copy: response = PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment", "pylint:disable=redefined-outer-name if experiment.description: Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not load experiment `{}`", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get jobs for experiment", "project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "``` Examples for getting experiment job logs: \\b ```bash $", "for getting experiment job logs: \\b ```bash $ polyaxon experiment", "experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment unbookmark", "is bookmarked.\") @experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx): \"\"\"Unbookmark experiment. Uses", "try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name,", "def resume(ctx, file, u): # pylint:disable=redefined-builtin \"\"\"Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "\\b ```bash $ polyaxon experiment -xp 19 resources --gpu ```", "sys.exit(1) Printer.print_success(\"Experiment updated.\") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True, default=False, help=\"Automatic", "Printer.print_warning('No argument was provided to update the experiment.') sys.exit(0) try:", "PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was restarted with", "is_flag=True, default=False, help=\"Whether or not to hide timestamps from the", "sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment))", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses()", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not stop experiment", "from the log stream.\") @click.pass_context @clean_outputs def logs(ctx, job, past,", "e: Printer.print_error('Could not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "Examples for getting an experiment: \\b ```bash $ polyaxon experiment", "if response.status_code == 204: Printer.print_success(\"Experiment `{}` was delete successfully\".format(_experiment)) @experiment.command()", "mnist logs ``` Examples for getting experiment job logs: \\b", "= get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context @clean_outputs def delete(ctx):", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job)", "as answer to all prompts and run non-interactively.\") @click.pass_context @clean_outputs", "# pylint:disable=redefined-builtin \"\"\"Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--page',", "Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for job `{}`.'.format(_job)) objects", "\"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name,", "None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name,", "for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs(", "u): # pylint:disable=redefined-builtin \"\"\"Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "user, project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError,", "'project', 'experiments', 'description', 'declarations', 'last_metric', 'resources', 'jobs', 'run_env' ]) Printer.print_header(\"Experiment", "message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if past: try: response =", "not get logs for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "```bash $ polyaxon experiment unbookmark ``` \\b ```bash $ polyaxon", "from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload import", "update_code=update_code) Printer.print_success('Experiment was resumed with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "answer to all prompts and run non-interactively.\") @click.pass_context @clean_outputs def", "_experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow: Printer.print_error( 'Could not", "experiment before restarting.\") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files", "Printer.print_success('Experiment was restarted with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "```bash $ polyaxon experiment -xp 1 outputs ``` \"\"\" user,", "experiment --experiment=1 restart ``` \"\"\" config = None update_code =", "@clean_outputs def bookmark(ctx): \"\"\"Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "meta = get_meta_response(response) if meta: Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:')", "logs.\") @click.option('--hide_time', is_flag=True, default=False, help=\"Whether or not to hide timestamps", "the experiment.') @click.option('--tags', type=str, help='Tags of the experiment, comma separated", "resumed with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "19 resources ``` For GPU resources \\b ```bash $ polyaxon", "get_logs_handler from polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions import PolyaxonClientException def", "`{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for experiment", "response = PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was", "experiment.last_metric: Printer.print_header(\"Experiment last metrics:\") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[", "```bash $ polyaxon experiment --experiment=1 get --job=10 ``` \\b ```bash", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is being stopped.\") @experiment.command()", "@click.pass_context @clean_outputs def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin \"\"\"Restart", "polyaxon experiment -xp 2 update --description=\"new description for my experiments\"", "= True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if", "statuses.\") @click.pass_context @clean_outputs def statuses(ctx, job, page): \"\"\"Get experiment or", "is_flag=True, help=\"Show the past logs.\") @click.option('--follow', '-f', is_flag=True, default=False, help=\"Stream", "\"\"\" def get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses( user, project_name, _experiment,", "import cache from polyaxon_cli.utils.formatting import ( Printer, dict_tabulate, get_meta_response, get_resources,", "= tags if not update_dict: Printer.print_warning('No argument was provided to", "\"\"\"Get experiment or experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume( user, project_name,", "experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for experiment", "`{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for Job", "\"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name,", "upload if u: ctx.invoke(upload, sync=False) update_code = True user, project_name,", "3 ``` \\b ```bash $ polyaxon experiment -xp 1 statuses", "the project, could be none.') @click.option('--description', type=str, help='Description of the", "experiment, comma separated values.') @click.pass_context @clean_outputs def update(ctx, name, description,", "getting experiment resources: \\b ```bash $ polyaxon experiment -xp 19", "type=str, help='Name of the experiment, must be unique within the", "getting an experiment: \\b ```bash $ polyaxon experiment get #", "``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user,", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for job", "polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting import", "not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is", "objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page = page or 1 user,", "job, page): \"\"\"Get experiment or experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching)", "``` \\b ```bash $ polyaxon experiment -xp 1 statuses ```", "_experiment, _job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "experiment logs: \\b ```bash $ polyaxon experiment logs ``` \\b", "polyaxon experiment -xp 2 bookmark ``` \"\"\" user, project_name, _experiment", "experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in", "$ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError,", "job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment =", "1 ``` \"\"\" def get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses( user,", "polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2 ```", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "paginate through the list of jobs.\") @click.pass_context @clean_outputs def jobs(ctx,", "`{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try: message_handler = Printer.gpu_resources if gpu", "experiment or experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting", "sys.exit(1) if response.status_code == 204: Printer.print_success(\"Experiment `{}` was delete successfully\".format(_experiment))", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not update experiment `{}`.'.format(_experiment))", "'config', 'project', 'experiments', 'description', 'declarations', 'last_metric', 'resources', 'jobs', 'run_env' ])", "id.\") @click.option('--gpu', '-g', is_flag=True, help=\"List experiment GPU resources.\") @click.pass_context @clean_outputs", "1 ``` For GPU resources \\b ```bash $ polyaxon experiment", "type=str, help=\"The project name, e.g. 'mnist' or 'adam/mnist'.\") @click.option('--experiment', '-xp',", "is_flag=True, default=False, help=\"Stream logs after showing past logs.\") @click.option('--hide_time', is_flag=True,", "``` \"\"\" def get_experiment_resources(): try: message_handler = Printer.gpu_resources if gpu", "e: Printer.print_error('Could not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not resume experiment `{}`.'.format(_experiment))", "caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "statuses ``` \\b ```bash $ polyaxon experiment -xp 1 statuses", "config=config, update_code=update_code) Printer.print_success('Experiment was restarted with id {}'.format(response.id)) except (PolyaxonHTTPError,", "get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation import", "```bash $ polyaxon experiment delete ``` \"\"\" user, project_name, _experiment", "get -j 1 # if experiment is cached ``` \\b", "'-j', type=int, help=\"The job id.\") @click.option('--page', type=int, help=\"To paginate through", "ctx.obj or {} ctx.obj['project'] = project ctx.obj['experiment'] = experiment @experiment.command()", "objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if", "experiment -xp 2 stop ``` \"\"\" user, project_name, _experiment =", "= project ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job', '-j', type=int, help=\"The", "project, could be none.') @click.option('--description', type=str, help='Description of the experiment.')", "provided to update the experiment.') sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment(", "response.status_code == 204: Printer.print_success(\"Experiment `{}` was delete successfully\".format(_experiment)) @experiment.command() @click.option('--name',", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "```bash $ polyaxon experiment -xp 1 statuses --job 1 ```", "@click.pass_context @clean_outputs def logs(ctx, job, past, follow, hide_time): \"\"\"Get experiment", "tags if not update_dict: Printer.print_warning('No argument was provided to update", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and", "--experiment=1 jobs ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int, help=\"The job", "resources -j 1 ``` For GPU resources \\b ```bash $", "ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses", "help=\"The experiment id number.\") @click.pass_context @clean_outputs def experiment(ctx, project, experiment):", "\"\"\"Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment", "--experiment=1 restart ``` \"\"\" config = None update_code = None", "'-y', is_flag=True, default=False, help=\"Automatic yes to prompts. \" \"Assume \\\"yes\\\"", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not load experiment", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark experiment", "None if file: config = rhea.read(file) # Check if we", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment updated.\") get_experiment_details(response) @experiment.command() @click.option('--yes',", "try: response = PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict) except (PolyaxonHTTPError,", "not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file',", "click.echo('Existing without stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except", "import absolute_import, division, print_function import sys import click import rhea", "e: Printer.print_error('Could not get logs for job `{}`.'.format(_job)) Printer.print_error('Error message", "to stop \" \"experiment `{}`\".format(_experiment)): click.echo('Existing without stopping experiment.') sys.exit(0)", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int, help=\"To paginate", "e: Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "yes and not click.confirm(\"Are sure you want to stop \"", "print_function import sys import click import rhea from polyaxon_cli.cli.getters.experiment import", "not click.confirm(\"Are sure you want to stop \" \"experiment `{}`\".format(_experiment)):", "get_experiment_job_logs(): if past: try: response = PolyaxonClient().experiment_job.logs( user, project_name, _experiment,", "get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(),", "config = None update_code = None if file: config =", "with id {}'.format(response.id)) else: response = PolyaxonClient().experiment.restart( user, project_name, _experiment,", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment()", "@click.pass_context @clean_outputs def statuses(ctx, job, page): \"\"\"Get experiment or experiment", "`{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header=\"Job resources:\")", "Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler)", "for my experiments\" ``` \\b ```bash $ polyaxon experiment -xp", "update with.\") @click.option('-u', is_flag=True, default=False, help=\"To upload the repo before", "get --job=10 ``` \\b ```bash $ polyaxon experiment -xp 1", "id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response)", "help=\"The job id.\") @click.option('--gpu', '-g', is_flag=True, help=\"List experiment GPU resources.\")", "[Caching](/references/polyaxon-cli/#caching) Example: \\b ```bash $ polyaxon experiment delete ``` \"\"\"", "polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not restart experiment `{}`.'.format(_experiment))", "e: Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "-xp 19 resources -j 1 ``` For GPU resources \\b", "def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager,", "$ polyaxon experiment -xp 1 outputs ``` \"\"\" user, project_name,", "'description', 'declarations', 'last_metric', 'resources', 'jobs', 'run_env' ]) Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response))", "experiment -xp 1 -p alain/cats-vs-dogs get -j 2 ``` \"\"\"", "@experiment.command() @click.pass_context @clean_outputs def bookmark(ctx): \"\"\"Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment logs:", "2 ``` \\b ```bash $ polyaxon experiment -xp 1 -p", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment --experiment=1 resume", "e: Printer.print_error('Could not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "Examples: \\b ```bash $ polyaxon experiment -xp 1 outputs ```", "job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment logs: \\b", "ExperimentJobManager from polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting import ( Printer,", "ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command()", "if name: update_dict['name'] = name if description: update_dict['description'] = description", "none.') @click.option('--description', type=str, help='Description of the experiment.') @click.option('--tags', type=str, help='Tags", "update_dict['description'] = description tags = validate_tags(tags) if tags: update_dict['tags'] =", "e: Printer.print_error('Could get status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "update_code = True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try:", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not update experiment", "job id.\") @click.pass_context @clean_outputs def get(ctx, job): \"\"\"Get experiment or", "cached ``` \\b ```bash $ polyaxon experiment --experiment=1 get --job=10", "PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "\\b ```bash $ polyaxon experiment stop ``` \\b ```bash $", "polyaxon experiment delete ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "experiment --experiment=1 jobs ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "absolute_import, division, print_function import sys import click import rhea from", "polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils import", "stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "$ polyaxon experiment stop ``` \\b ```bash $ polyaxon experiment", "description, tags): \"\"\"Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job() else:", "from polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting", "_experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download", "if experiment.declarations: Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header(\"Experiment last metrics:\")", "if experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\") if experiment.declarations: Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations)", "page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could get status", "job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j',", "user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "_experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "resume(ctx, file, u): # pylint:disable=redefined-builtin \"\"\"Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for job", "Examples: \\b ```bash $ polyaxon experiment bookmark ``` \\b ```bash", "``` \\b ```bash $ polyaxon experiment --experiment=1 get --job=10 ```", "polyaxon experiment get # if experiment is cached ``` \\b", "page or 1 try: response = PolyaxonClient().experiment.list_jobs( user, project_name, _experiment,", "logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs():", "message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try: message_handler = Printer.gpu_resources if", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError,", "__future__ import absolute_import, division, print_function import sys import click import", "get_experiment_job_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user,", "@click.option('--experiment', '-xp', type=int, help=\"The experiment id number.\") @click.pass_context @clean_outputs def", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job =", "@experiment.command() @click.option('--yes', '-y', is_flag=True, default=False, help=\"Automatic yes to prompts. \"", "return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow:", "\\b ```bash $ polyaxon experiment -xp 2 update --tags=\"foo, bar\"", "response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not load", "sync=False) update_code = True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "-xp 1 statuses --job 1 ``` \"\"\" def get_experiment_statuses(): try:", "Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs def bookmark(ctx): \"\"\"Bookmark experiment. Uses", "u: ctx.invoke(upload, sync=False) update_code = True user, project_name, _experiment =", "@click.option('-u', is_flag=True, default=False, help=\"To upload the repo before resuming.\") @click.pass_context", "dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job", "could be none.') @click.option('--description', type=str, help='Description of the experiment.') @click.option('--tags',", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True),", "from polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from", "default=False, help=\"Automatic yes to prompts. \" \"Assume \\\"yes\\\" as answer", "polyaxon experiment -xp 2 stop ``` \"\"\" user, project_name, _experiment", "sys.exit(1) @experiment.command() @click.option('--page', type=int, help=\"To paginate through the list of", "default=False, help=\"Stream logs after showing past logs.\") @click.option('--hide_time', is_flag=True, default=False,", "project_name, _experiment, _job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "bookmark(ctx): \"\"\"Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "'-xp', type=int, help=\"The experiment id number.\") @click.pass_context @clean_outputs def experiment(ctx,", "experiment -xp 2 update --description=\"new description for my experiments\" ```", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs()", "get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def outputs(ctx): \"\"\"Download", "$ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples", "\"\"\"Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment", "\"\"\"Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment", "for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try:", "`{}`.'.format(e)) sys.exit(1) if response.status_code == 204: Printer.print_success(\"Experiment `{}` was delete", "get_experiment() @experiment.command() @click.pass_context @clean_outputs def delete(ctx): \"\"\"Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is", "is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--page', type=int,", "polyaxon experiment --experiment=1 get ``` \\b ```bash $ polyaxon experiment", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get jobs", "Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment logs: \\b ```bash $", "19 resources -j 1 ``` For GPU resources \\b ```bash", "\"\"\" def get_experiment_resources(): try: message_handler = Printer.gpu_resources if gpu else", "if meta: Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No", "user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was resumed with id", "if we need to upload if u: ctx.invoke(upload, sync=False) update_code", "\"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm(\"Are", "PolyaxonClientException) as e: if not follow: Printer.print_error( 'Could not get", "ctx.obj.get('experiment')) page = page or 1 try: response = PolyaxonClient().experiment.list_jobs(", "try: response = PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment", "else: response = PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {} if name: update_dict['name'] = name", "\"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {}", "ctx.invoke(upload, sync=False) update_code = True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user,", "Examples: \\b ```bash $ polyaxon experiment unbookmark ``` \\b ```bash", "@click.pass_context @clean_outputs def resources(ctx, job, gpu): \"\"\"Get experiment or experiment", "past logs.\") @click.option('--hide_time', is_flag=True, default=False, help=\"Whether or not to hide", "found for experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in", "@experiment.command() @click.option('--page', type=int, help=\"To paginate through the list of jobs.\")", "-xp 19 resources -j 1 --gpu ``` \"\"\" def get_experiment_resources():", "@experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.pass_context @clean_outputs def", "`{}`\".format(_experiment)): click.echo('Existing without deleting experiment.') sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment(", "is cached ``` \\b ```bash $ polyaxon experiment --experiment=1 get", "`{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name,", "experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project', 'experiments', 'description', 'declarations', 'last_metric',", "cache from polyaxon_cli.utils.formatting import ( Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate", "multiple=True, type=click.Path(exists=True), help=\"The polyaxon files to update with.\") @click.option('-u', is_flag=True,", "list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects: Printer.print_header(\"Statuses:\")", "ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command()", "'-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files to update with.\") @click.option('-u',", "[Caching](/references/polyaxon-cli/#caching) Examples for getting experiment logs: \\b ```bash $ polyaxon", "``` \\b ```bash $ polyaxon experiment -xp 2 update --tags=\"foo,", "``` \\b ```bash $ polyaxon experiment -xp 2 stop ```", "@click.pass_context @clean_outputs def outputs(ctx): \"\"\"Download outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "@clean_outputs def delete(ctx): \"\"\"Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \\b ```bash", "ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command()", "repo before resuming.\") @click.pass_context @clean_outputs def resume(ctx, file, u): #", "= get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def outputs(ctx):", "cached ``` \\b ```bash $ polyaxon experiment --experiment=1 get ```", "_job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context @clean_outputs def", "file, u): # pylint:disable=redefined-builtin \"\"\"Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "-j 3 ``` \\b ```bash $ polyaxon experiment -xp 1", "@click.option('--yes', '-y', is_flag=True, default=False, help=\"Automatic yes to prompts. \" \"Assume", "experiment --experiment=1 get ``` \\b ```bash $ polyaxon experiment -xp", "\"\"\"Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment", "ctx.obj.get('experiment')) update_dict = {} if name: update_dict['name'] = name if", "\"\"\"Download outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "experiment get # if experiment is cached ``` \\b ```bash", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download outputs", ") from polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation import validate_tags from", "outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is", "= list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects:", "\\b ```bash $ polyaxon experiment unbookmark ``` \\b ```bash $", "experiment -xp 1 statuses ``` Examples getting experiment job statuses:", "\"\"\"Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting", "@click.option('--past', '-p', is_flag=True, help=\"Show the past logs.\") @click.option('--follow', '-f', is_flag=True,", "help=\"The job id.\") @click.option('--page', type=int, help=\"To paginate through the list", "--experiment=1 get --job=10 ``` \\b ```bash $ polyaxon experiment -xp", "resuming.\") @click.pass_context @clean_outputs def resume(ctx, file, u): # pylint:disable=redefined-builtin \"\"\"Resume", "user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was copied with id", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if", "statuses --job 1 ``` \"\"\" def get_experiment_statuses(): try: response =", "$ polyaxon experiment delete ``` \"\"\" user, project_name, _experiment =", "Examples getting experiment job statuses: \\b ```bash $ polyaxon experiment", "or {} ctx.obj['project'] = project ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job',", "$ polyaxon experiment statuses -j 3 ``` \\b ```bash $", "@clean_outputs def resume(ctx, file, u): # pylint:disable=redefined-builtin \"\"\"Resume experiment. Uses", "Printer.print_error( 'Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error", "dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment,", "polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting import ( Printer, dict_tabulate, get_meta_response,", "not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if", "if gpu else Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler) except", "get logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def", "experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user,", "objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']] objects = list_dicts_to_tabulate(objects)", "$ polyaxon experiment -xp 19 resources --gpu ``` Examples for", "bar\" --name=\"unique-name\" ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "'run_env' ]) Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str, help=\"The", "bookmark ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try:", "$ polyaxon experiment get # if experiment is cached ```", "```bash $ polyaxon experiment -xp 1 statuses ``` Examples getting", "# pylint:disable=redefined-outer-name if experiment.description: Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(),", "experiment.declarations: Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header(\"Experiment last metrics:\") dict_tabulate(experiment.last_metric)", "\"\"\" def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager,", "experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']] objects", "clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager from", "job id.\") @click.option('--page', type=int, help=\"To paginate through the list of", "delete(ctx): \"\"\"Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \\b ```bash $ polyaxon", "and run non-interactively.\") @click.pass_context @clean_outputs def stop(ctx, yes): \"\"\"Stop experiment.", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "= PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "showing past logs.\") @click.option('--hide_time', is_flag=True, default=False, help=\"Whether or not to", "load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def", "with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "`{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment))", "as e: Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \\b ```bash $", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs", "response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get", "getting experiment job resources: \\b ```bash $ polyaxon experiment -xp", "logs.\") @click.option('--follow', '-f', is_flag=True, default=False, help=\"Stream logs after showing past", "hide_time, stream=False)(response.content.decode().split('\\n')) print() if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError,", "list_dicts_to_tabulate(objects) if objects: Printer.print_header(\"Jobs:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job',", "restarted with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context @clean_outputs def delete(ctx): \"\"\"Delete experiment.", "as e: if not follow: Printer.print_error( 'Could not get logs", "@experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files to update", "Printer.print_error('Could not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--past', '-p',", "within the project, could be none.') @click.option('--description', type=str, help='Description of", "```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ```", "ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command()", "experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment -xp", "2 ``` \"\"\" def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name,", "if copy: response = PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config, update_code=update_code)", "$ polyaxon experiment -xp 2 update --description=\"new description for my", ") from polyaxon_cli.cli.upload import upload from polyaxon_cli.client import PolyaxonClient from", "follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not", "the log stream.\") @click.pass_context @clean_outputs def logs(ctx, job, past, follow,", "message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for", "meta: Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses", "if not yes and not click.confirm(\"Are sure you want to", "PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "_job = get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int,", "import ExperimentJobManager from polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting import (", "``` Examples for getting experiment job resources: \\b ```bash $", "--experiment=1 resume ``` \"\"\" config = None update_code = None", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for", "'unique_name', 'resources'] )) Printer.print_header(\"Job info:\") dict_tabulate(response) user, project_name, _experiment =", "help='Tags of the experiment, comma separated values.') @click.pass_context @clean_outputs def", "'-g', is_flag=True, help=\"List experiment GPU resources.\") @click.pass_context @clean_outputs def resources(ctx,", "\" \"experiment `{}`\".format(_experiment)): click.echo('Existing without stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user,", "PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "1 --project=cats-vs-dogs get -j 2 ``` \\b ```bash $ polyaxon", "not update_dict: Printer.print_warning('No argument was provided to update the experiment.')", "if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler) except", "PolyaxonClientException) as e: Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "list of statuses.\") @click.pass_context @clean_outputs def statuses(ctx, job, page): \"\"\"Get", "Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment resources: \\b ```bash $", "polyaxon files to update with.\") @click.option('-u', is_flag=True, default=False, help=\"To upload", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {} if name: update_dict['name'] =", "PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print()", "get_meta_response(response) if meta: Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else:", "sure you want to delete experiment `{}`\".format(_experiment)): click.echo('Existing without deleting", "= PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "experiment -xp 1 --project=cats-vs-dogs get -j 2 ``` \\b ```bash", "'experiment', 'unique_name', 'resources'] )) Printer.print_header(\"Job info:\") dict_tabulate(response) user, project_name, _experiment", "sys.exit(1) Printer.print_success(\"Experiment is bookmarked.\") @experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx): \"\"\"Unbookmark", "try: response = PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True,", "def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description: Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description)) if", "def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name \"\"\"Commands for experiments.\"\"\" ctx.obj", "sys.exit(1) def get_experiment_job_resources(): try: message_handler = Printer.gpu_resources if gpu else", "\\b ```bash $ polyaxon experiment statuses -j 3 ``` \\b", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources()", "if not update_dict: Printer.print_warning('No argument was provided to update the", "_experiment, config=config, update_code=update_code) Printer.print_success('Experiment was resumed with id {}'.format(response.id)) except", "'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header(\"Job info:\") dict_tabulate(response) user, project_name,", "objects: Printer.print_header(\"Statuses:\") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page = page or", "getting experiment statuses: \\b ```bash $ polyaxon experiment statuses ```", "PolyaxonClientException) as e: Printer.print_error('Could not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "@click.option('--follow', '-f', is_flag=True, default=False, help=\"Stream logs after showing past logs.\")", "-xp 1 -j 1 logs ``` \"\"\" def get_experiment_logs(): if", "follow, hide_time): \"\"\"Get experiment or experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching)", "dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str, help=\"The project name, e.g. 'mnist'", "description tags = validate_tags(tags) if tags: update_dict['tags'] = tags if", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment unbookmark ```", "info:\") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str, help=\"The project name, e.g.", "{}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not resume", "experiment resources: \\b ```bash $ polyaxon experiment -xp 19 resources", "$ polyaxon experiment -xp 19 resources -j 1 --gpu ```", "get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\")", "sys.exit(1) def get_experiment_job_logs(): if past: try: response = PolyaxonClient().experiment_job.logs( user,", "project_name, _experiment) # Purge caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "was resumed with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is bookmarked.\")", "@click.group() @click.option('--project', '-p', type=str, help=\"The project name, e.g. 'mnist' or", "response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] ))", "--job=10 ``` \\b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs", "```bash $ polyaxon experiment statuses -j 3 ``` \\b ```bash", "an experiment: \\b ```bash $ polyaxon experiment get # if", "@experiment.command() @click.option('--name', type=str, help='Name of the experiment, must be unique", "resources:\") if experiment.declarations: Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header(\"Experiment last", "@experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--past', '-p', is_flag=True,", "header=\"Experiment resources:\") if experiment.declarations: Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header(\"Experiment", "-j 2 ``` \"\"\" def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user,", "e: Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "the experiment, comma separated values.') @click.pass_context @clean_outputs def update(ctx, name,", "sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment))", "```bash $ polyaxon experiment --experiment=1 jobs ``` \"\"\" user, project_name,", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm(\"Are sure you want", "update_code=update_code) Printer.print_success('Experiment was copied with id {}'.format(response.id)) else: response =", "help=\"The polyaxon files to update with.\") @click.option('-u', is_flag=True, default=False, help=\"To", "= PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n'))", "as e: Printer.print_error('Could not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "statuses -j 3 ``` \\b ```bash $ polyaxon experiment -xp", "from polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment):", "click.echo('{}\\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\") if experiment.declarations: Printer.print_header(\"Experiment declarations:\")", "not click.confirm(\"Are sure you want to delete experiment `{}`\".format(_experiment)): click.echo('Existing", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try: message_handler = Printer.gpu_resources", "experiment GPU resources.\") @click.pass_context @clean_outputs def resources(ctx, job, gpu): \"\"\"Get", "show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "type=int, help=\"The job id.\") @click.option('--page', type=int, help=\"To paginate through the", "= True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response", "-xp 2 update --description=\"new description for my experiments\" ``` \\b", "= list_dicts_to_tabulate(objects) if objects: Printer.print_header(\"Jobs:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command()", "experiment statuses -j 3 ``` \\b ```bash $ polyaxon experiment", "experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment resources:", "validate_tags from polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if", "= get_meta_response(response) if meta: Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta)", "polyaxon experiment statuses ``` \\b ```bash $ polyaxon experiment -xp", "_job = get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def", "Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment)) objects", "experiment -xp 2 bookmark ``` \"\"\" user, project_name, _experiment =", "stop(ctx, yes): \"\"\"Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment", "experiment bookmark ``` \\b ```bash $ polyaxon experiment -xp 2", "for o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('job', None) dict_tabulate(objects,", "user, project_name, _experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "config = rhea.read(file) # Check if we need to upload", "experiment -xp 19 resources --gpu ``` Examples for getting experiment", "help='Description of the experiment.') @click.option('--tags', type=str, help='Tags of the experiment,", "\"\"\" config = None update_code = None if file: config", "id number.\") @click.pass_context @clean_outputs def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name", "$ polyaxon experiment -xp 1 -j 1 logs ``` \"\"\"", "get_project_experiment_or_local ) from polyaxon_cli.cli.upload import upload from polyaxon_cli.client import PolyaxonClient", "resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment resources: \\b ```bash", "def bookmark(ctx): \"\"\"Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code == 204: Printer.print_success(\"Experiment `{}`", "@clean_outputs def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name \"\"\"Commands for experiments.\"\"\"", "not get jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get jobs for", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int, help=\"To paginate through", "update_dict = {} if name: update_dict['name'] = name if description:", "paginate through the list of statuses.\") @click.pass_context @clean_outputs def statuses(ctx,", "@click.pass_context @clean_outputs def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name \"\"\"Commands for", "\\b ```bash $ polyaxon experiment get -j 1 # if", "[Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses: \\b ```bash $ polyaxon experiment", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment)", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except", "hide_time): \"\"\"Get experiment or experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples", "$ polyaxon experiment --experiment=1 get --job=10 ``` \\b ```bash $", "delete experiment `{}`\".format(_experiment)): click.echo('Existing without deleting experiment.') sys.exit(1) try: response", "polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger", "rhea from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload", "description: update_dict['description'] = description tags = validate_tags(tags) if tags: update_dict['tags']", "default=False, help=\"Whether or not to hide timestamps from the log", "@click.option('--page', type=int, help=\"To paginate through the list of jobs.\") @click.pass_context", "_experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page or 1 try:", "\\b ```bash $ polyaxon experiment --experiment=1 jobs ``` \"\"\" user,", "$ polyaxon experiment get -j 1 # if experiment is", "1 -p alain/cats-vs-dogs get -j 2 ``` \"\"\" def get_experiment():", "get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload import upload from polyaxon_cli.client import", "click.echo('Existing without deleting experiment.') sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment( user,", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job()", "values.') @click.pass_context @clean_outputs def update(ctx, name, description, tags): \"\"\"Update experiment.", "resources.\") @click.pass_context @clean_outputs def resources(ctx, job, gpu): \"\"\"Get experiment or", "jobs(ctx, page): \"\"\"List jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "``` Examples getting experiment job statuses: \\b ```bash $ polyaxon", "sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job", "Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str, help=\"The project name,", "--job 1 ``` \"\"\" def get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses(", "get ``` \\b ```bash $ polyaxon experiment -xp 1 -p", "--gpu ``` Examples for getting experiment job resources: \\b ```bash", "PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError,", "`{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']] objects =", "sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment,", "is_flag=True, default=False, help=\"To upload the repo before restarting.\") @click.pass_context @clean_outputs", "Examples: \\b ```bash $ polyaxon experiment --experiment=1 jobs ``` \"\"\"", "as e: Printer.print_error('Could not get logs for job `{}`.'.format(_job)) Printer.print_error('Error", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and not click.confirm(\"Are", "PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header=\"Job resources:\") response = Printer.add_status_color(response.to_light_dict( humanize_values=True,", "$ polyaxon experiment --experiment=1 restart ``` \"\"\" config = None", "project_name, _experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "description for my experiments\" ``` \\b ```bash $ polyaxon experiment", "Printer.print_error('Could not get status for job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e))", "stop ``` \\b ```bash $ polyaxon experiment -xp 2 stop", "response = PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) # Purge caching ExperimentManager.purge()", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow: Printer.print_error(", "experiment id number.\") @click.pass_context @clean_outputs def experiment(ctx, project, experiment): #", "\\b ```bash $ polyaxon experiment --experiment=1 get --job=10 ``` \\b", "response = PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "``` \"\"\" def get_experiment_logs(): if past: try: response = PolyaxonClient().experiment.logs(", "experiment get -j 1 # if experiment is cached ```", "try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is being", "e.g. 'mnist' or 'adam/mnist'.\") @click.option('--experiment', '-xp', type=int, help=\"The experiment id", "`{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs def bookmark(ctx): \"\"\"Bookmark", "'mnist' or 'adam/mnist'.\") @click.option('--experiment', '-xp', type=int, help=\"The experiment id number.\")", "not get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources:", "or experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment", "not follow: Printer.print_error( 'Could not get logs for experiment `{}`.'.format(_experiment))", "-j 1 ``` For GPU resources \\b ```bash $ polyaxon", "```bash $ polyaxon experiment -xp 19 resources ``` For GPU", "dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header(\"Experiment last metrics:\") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict(", "Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment", "experiment `{}`\".format(_experiment)): click.echo('Existing without deleting experiment.') sys.exit(1) try: response =", "@click.pass_context @clean_outputs def delete(ctx): \"\"\"Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \\b", "sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for Job `{}`.'.format(_job))", "job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header=\"Job", "jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta =", "-j 1 logs ``` \"\"\" def get_experiment_logs(): if past: try:", "as e: Printer.print_error('Could not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "10 -p mnist logs ``` Examples for getting experiment job", "validate_tags(tags) if tags: update_dict['tags'] = tags if not update_dict: Printer.print_warning('No", "get status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta", "was provided to update the experiment.') sys.exit(0) try: response =", "utf-8 -*- from __future__ import absolute_import, division, print_function import sys", "Examples: \\b ```bash $ polyaxon experiment -xp 2 update --description=\"new", "experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses: \\b", "past: try: response = PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, stream=False)", "as e: Printer.print_error('Could not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "update_dict: Printer.print_warning('No argument was provided to update the experiment.') sys.exit(0)", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get status for", "yes to prompts. \" \"Assume \\\"yes\\\" as answer to all", "Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header(\"Job info:\")", "not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page',", "$ polyaxon experiment -xp 19 resources -j 1 ``` For", "experiment job: \\b ```bash $ polyaxon experiment get -j 1", "get resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def", "help=\"To copy the experiment before restarting.\") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True),", "jobs found for experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o", "import ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils import cache", "log stream.\") @click.pass_context @clean_outputs def logs(ctx, job, past, follow, hide_time):", "info:\") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job:", "gpu else Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError,", "def get_experiment_logs(): if past: try: response = PolyaxonClient().experiment.logs( user, project_name,", "PolyaxonClientException) as e: Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment))", "downloaded.') @experiment.command() @click.pass_context @clean_outputs def bookmark(ctx): \"\"\"Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses", "19 resources -j 1 --gpu ``` \"\"\" def get_experiment_resources(): try:", "polyaxon experiment -xp 19 resources -j 1 ``` For GPU", "PolyaxonClientException) as e: Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment))", "experiment job logs: \\b ```bash $ polyaxon experiment -xp 1", "or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment:", "def get_experiment_job_logs(): if past: try: response = PolyaxonClient().experiment_job.logs( user, project_name,", "polyaxon experiment bookmark ``` \\b ```bash $ polyaxon experiment -xp", "user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "job id.\") @click.option('--past', '-p', is_flag=True, help=\"Show the past logs.\") @click.option('--follow',", "`{}` was delete successfully\".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name of the", "import validate_tags from polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name", "$ polyaxon experiment -xp 19 resources ``` For GPU resources", "get_meta_response(response) if meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else:", "experiment -xp 1 -j 1 logs ``` \"\"\" def get_experiment_logs():", "all prompts and run non-interactively.\") @click.pass_context @clean_outputs def stop(ctx, yes):", "not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e))", "`{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']])", "sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict) except", "`{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int, help=\"To paginate through the list", "experiment statuses ``` \\b ```bash $ polyaxon experiment -xp 1", "unbookmark ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try:", "-j 2 ``` \\b ```bash $ polyaxon experiment -xp 1", "job, gpu): \"\"\"Get experiment or experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching)", "help=\"Whether or not to hide timestamps from the log stream.\")", "_experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could get", "job, past, follow, hide_time): \"\"\"Get experiment or experiment job logs.", "try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "Printer.print_error('Could not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment", "pylint:disable=redefined-builtin \"\"\"Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was resumed with id {}'.format(response.id))", "metrics:\") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project',", "{} ctx.obj['project'] = project ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job', '-j',", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment --experiment=1 jobs", "meta = get_meta_response(response) if meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:')", "resources --gpu ``` Examples for getting experiment job resources: \\b", "get_meta_response(response) if meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else:", "type=str, help='Tags of the experiment, comma separated values.') @click.pass_context @clean_outputs", "else Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "= None if file: config = rhea.read(file) # Check if", "dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project', 'experiments',", "```bash $ polyaxon experiment --experiment=1 restart ``` \"\"\" config =", "get ``` \\b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs", "id.\") @click.option('--page', type=int, help=\"To paginate through the list of statuses.\")", "_experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if not follow:", "status_key='status') for o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('experiment', None)", "user, project_name, _experiment) # Purge caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError,", "past, follow, hide_time): \"\"\"Get experiment or experiment job logs. Uses", "help=\"Stream logs after showing past logs.\") @click.option('--hide_time', is_flag=True, default=False, help=\"Whether", "@clean_outputs def logs(ctx, job, past, follow, hide_time): \"\"\"Get experiment or", "if file: config = rhea.read(file) # Check if we need", "jobs ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page", "type=int, help=\"The job id.\") @click.option('--gpu', '-g', is_flag=True, help=\"List experiment GPU", "get logs for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user,", "must be unique within the project, could be none.') @click.option('--description',", "19 resources --gpu ``` Examples for getting experiment job resources:", "get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1)", "--gpu ``` \"\"\" def get_experiment_resources(): try: message_handler = Printer.gpu_resources if", "e: Printer.print_error('Could not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "we need to upload if u: ctx.invoke(upload, sync=False) update_code =", "$ polyaxon experiment unbookmark ``` \\b ```bash $ polyaxon experiment", "ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config, update_code=update_code)", "message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not", "PolyaxonClientException) as e: Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "job: \\b ```bash $ polyaxon experiment get -j 1 #", "\\b ```bash $ polyaxon experiment -xp 1 statuses ``` Examples", "if past: try: response = PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job,", "project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "GPU resources.\") @click.pass_context @clean_outputs def resources(ctx, job, gpu): \"\"\"Get experiment", "copy, file, u): # pylint:disable=redefined-builtin \"\"\"Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "( Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import", "experiments.\"\"\" ctx.obj = ctx.obj or {} ctx.obj['project'] = project ctx.obj['experiment']", "```bash $ polyaxon experiment --experiment=1 resume ``` \"\"\" config =", "of jobs.\") @click.pass_context @clean_outputs def jobs(ctx, page): \"\"\"List jobs for", "polyaxon experiment -xp 2 update --tags=\"foo, bar\" --name=\"unique-name\" ``` \"\"\"", "through the list of statuses.\") @click.pass_context @clean_outputs def statuses(ctx, job,", "stopped.\") @experiment.command() @click.option('--copy', '-c', is_flag=True, default=False, help=\"To copy the experiment", "```bash $ polyaxon experiment get -j 1 # if experiment", "\"\"\" def get_experiment_logs(): if past: try: response = PolyaxonClient().experiment.logs( user,", "logs: \\b ```bash $ polyaxon experiment -xp 1 -j 1", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response =", "Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job,", "Examples for getting experiment logs: \\b ```bash $ polyaxon experiment", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_logs() else:", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs", "from polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description:", "sure you want to stop \" \"experiment `{}`\".format(_experiment)): click.echo('Existing without", "help=\"The job id.\") @click.pass_context @clean_outputs def get(ctx, job): \"\"\"Get experiment", "from polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions", "config=config, update_code=update_code) Printer.print_success('Experiment was resumed with id {}'.format(response.id)) except (PolyaxonHTTPError,", "_job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "'experiments', 'description', 'declarations', 'last_metric', 'resources', 'jobs', 'run_env' ]) Printer.print_header(\"Experiment info:\")", "$ polyaxon experiment logs ``` \\b ```bash $ polyaxon experiment", "experiment -xp 1 --project=cats-vs-dogs get ``` \\b ```bash $ polyaxon", "outputs ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try:", "to prompts. \" \"Assume \\\"yes\\\" as answer to all prompts", "is being stopped.\") @experiment.command() @click.option('--copy', '-c', is_flag=True, default=False, help=\"To copy", "dict_tabulate(meta) else: Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment)) objects =", "not to hide timestamps from the log stream.\") @click.pass_context @clean_outputs", "e: Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "experiments\" ``` \\b ```bash $ polyaxon experiment -xp 2 update", "@clean_outputs def jobs(ctx, page): \"\"\"List jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "= None update_code = None if file: config = rhea.read(file)", "= ctx.obj or {} ctx.obj['project'] = project ctx.obj['experiment'] = experiment", "# pylint:disable=redefined-builtin \"\"\"Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True),", "logs ``` \\b ```bash $ polyaxon experiment -xp 10 -p", "Examples: \\b ```bash $ polyaxon experiment stop ``` \\b ```bash", "PolyaxonClientException) as e: Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error message", "ctx.obj.get('experiment')) try: if copy: response = PolyaxonClient().experiment.copy( user, project_name, _experiment,", "Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses: \\b ```bash $ polyaxon", "type=click.Path(exists=True), help=\"The polyaxon files to update with.\") @click.option('-u', is_flag=True, default=False,", "'last_metric', 'resources', 'jobs', 'run_env' ]) Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project',", "else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--gpu',", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment stop ``` \\b", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {} if name:", "if job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job',", "getting experiment logs: \\b ```bash $ polyaxon experiment logs ```", "$ polyaxon experiment --experiment=1 resume ``` \"\"\" config = None", "if not click.confirm(\"Are sure you want to delete experiment `{}`\".format(_experiment)):", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs def", "get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page) except", "not download outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "-xp 2 update --tags=\"foo, bar\" --name=\"unique-name\" ``` \"\"\" user, project_name,", "logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment logs: \\b ```bash", "_experiment, config=config, update_code=update_code) Printer.print_success('Experiment was restarted with id {}'.format(response.id)) except", "`{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time))", "\\b ```bash $ polyaxon experiment --experiment=1 get ``` \\b ```bash", "``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user,", "e: if not follow: Printer.print_error( 'Could not get logs for", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not restart experiment", "sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) # Purge", "help=\"To paginate through the list of jobs.\") @click.pass_context @clean_outputs def", "\"\"\"Get experiment or experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting", "polyaxon experiment -xp 1 statuses ``` Examples getting experiment job", "o in response['results']] objects = list_dicts_to_tabulate(objects) if objects: Printer.print_header(\"Jobs:\") objects.pop('experiment',", "def get(ctx, job): \"\"\"Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching)", "help=\"List experiment GPU resources.\") @click.pass_context @clean_outputs def resources(ctx, job, gpu):", "argument was provided to update the experiment.') sys.exit(0) try: response", "be unique within the project, could be none.') @click.option('--description', type=str,", "job: _job = get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs", "else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def outputs(ctx): \"\"\"Download outputs for", "( get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload import upload from polyaxon_cli.client", "experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples for getting", "Examples for getting experiment resources: \\b ```bash $ polyaxon experiment", "e: Printer.print_error('Could not get resources for job `{}`.'.format(_job)) Printer.print_error('Error message", "PolyaxonClientException) as e: Printer.print_error('Could get status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "type=int, help=\"To paginate through the list of statuses.\") @click.pass_context @clean_outputs", "after showing past logs.\") @click.option('--hide_time', is_flag=True, default=False, help=\"Whether or not", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not delete experiment", "``` \\b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs", "successfully\".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name of the experiment, must be", "\"experiment `{}`\".format(_experiment)): click.echo('Existing without stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name,", "response = PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "polyaxon experiment stop ``` \\b ```bash $ polyaxon experiment -xp", "separated values.') @click.pass_context @clean_outputs def update(ctx, name, description, tags): \"\"\"Update", "an experiment job: \\b ```bash $ polyaxon experiment get -j", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment)", "get resources for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user,", "sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "is_flag=True, default=False, help=\"To upload the repo before resuming.\") @click.pass_context @clean_outputs", "message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment,", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "experiment -xp 19 resources ``` For GPU resources \\b ```bash", "list of jobs.\") @click.pass_context @clean_outputs def jobs(ctx, page): \"\"\"List jobs", "else: get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--past',", "statuses: \\b ```bash $ polyaxon experiment statuses -j 3 ```", "def statuses(ctx, job, page): \"\"\"Get experiment or experiment job statuses.", "@click.pass_context @clean_outputs def resume(ctx, file, u): # pylint:disable=redefined-builtin \"\"\"Resume experiment.", "\\b ```bash $ polyaxon experiment -xp 1 statuses --job 1", "as e: Printer.print_error('Could not get resources for job `{}`.'.format(_job)) Printer.print_error('Error", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm(\"Are sure", "PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "updated.\") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True, default=False, help=\"Automatic yes to", "-xp 2 unbookmark ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "getting an experiment job: \\b ```bash $ polyaxon experiment get", "`{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job:", "--project=cats-vs-dogs get -j 2 ``` \\b ```bash $ polyaxon experiment", "file: config = rhea.read(file) # Check if we need to", "@click.pass_context @clean_outputs def update(ctx, name, description, tags): \"\"\"Update experiment. Uses", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy: response", "alain/cats-vs-dogs get ``` Examples for getting an experiment job: \\b", "job id.\") @click.option('--gpu', '-g', is_flag=True, help=\"List experiment GPU resources.\") @click.pass_context", "type=int, help=\"To paginate through the list of jobs.\") @click.pass_context @clean_outputs", "experiment -xp 19 resources -j 1 --gpu ``` \"\"\" def", "2 bookmark ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "@clean_outputs def update(ctx, name, description, tags): \"\"\"Update experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "1 statuses ``` Examples getting experiment job statuses: \\b ```bash", "_job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "response = PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "my experiments\" ``` \\b ```bash $ polyaxon experiment -xp 2", "= Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header(\"Job", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download outputs for experiment", "division, print_function import sys import click import rhea from polyaxon_cli.cli.getters.experiment", "default=False, help=\"To upload the repo before restarting.\") @click.pass_context @clean_outputs def", "polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload import upload", "job): \"\"\"Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for", "\\b ```bash $ polyaxon experiment -xp 2 unbookmark ``` \"\"\"", "the repo before resuming.\") @click.pass_context @clean_outputs def resume(ctx, file, u):", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm(\"Are sure you want to", "\"\"\"Commands for experiments.\"\"\" ctx.obj = ctx.obj or {} ctx.obj['project'] =", "```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j", "Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment,", "restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f',", "experiment --experiment=1 resume ``` \"\"\" config = None update_code =", "@click.option('-u', is_flag=True, default=False, help=\"To upload the repo before restarting.\") @click.pass_context", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses() else:", "id {}'.format(response.id)) else: response = PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config,", "PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n'))", "polyaxon experiment --experiment=1 restart ``` \"\"\" config = None update_code", "polyaxon_cli.cli.upload import upload from polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions import", "experiment or experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting", "without stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError,", "def stop(ctx, yes): \"\"\"Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "update --tags=\"foo, bar\" --name=\"unique-name\" ``` \"\"\" user, project_name, _experiment =", "Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found", "get jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta", "statuses found for experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')", "@experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx): \"\"\"Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "of the experiment.') @click.option('--tags', type=str, help='Tags of the experiment, comma", "sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time))", "through the list of jobs.\") @click.pass_context @clean_outputs def jobs(ctx, page):", "not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy: response =", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError,", "``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page =", "from polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils", "\\b ```bash $ polyaxon experiment -xp 2 update --description=\"new description", "get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True, default=False, help=\"Automatic yes to prompts.", "experiment -xp 1 statuses --job 1 ``` \"\"\" def get_experiment_statuses():", "help='Name of the experiment, must be unique within the project,", "import PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description: Printer.print_header(\"Experiment description:\")", "objects = list_dicts_to_tabulate(objects) if objects: Printer.print_header(\"Jobs:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True)", "-xp 19 resources ``` For GPU resources \\b ```bash $", "resources for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name,", "polyaxon experiment -xp 10 -p mnist logs ``` Examples for", "= PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was resumed", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is being stopped.\") @experiment.command() @click.option('--copy',", "polyaxon experiment -xp 1 outputs ``` \"\"\" user, project_name, _experiment", "Printer.print_header(\"Jobs:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int, help=\"The", "job: _job = get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job', '-j',", "for getting experiment job resources: \\b ```bash $ polyaxon experiment", "ctx.obj = ctx.obj or {} ctx.obj['project'] = project ctx.obj['experiment'] =", "import ( get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload import upload from", "import get_logs_handler from polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions import PolyaxonClientException", "_experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not unbookmark", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm(\"Are sure you", "$ polyaxon experiment -xp 10 -p mnist logs ``` Examples", "Printer.print_success('Experiment was copied with id {}'.format(response.id)) else: response = PolyaxonClient().experiment.restart(", "$ polyaxon experiment --experiment=1 jobs ``` \"\"\" user, project_name, _experiment", "`{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name,", "get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def outputs(ctx): \"\"\"Download outputs", "for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for", "name, e.g. 'mnist' or 'adam/mnist'.\") @click.option('--experiment', '-xp', type=int, help=\"The experiment", "as e: Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "def outputs(ctx): \"\"\"Download outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if not", "= PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was copied", "gpu): \"\"\"Get experiment or experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples", "comma separated values.') @click.pass_context @clean_outputs def update(ctx, name, description, tags):", "job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment resources: \\b", "job resources: \\b ```bash $ polyaxon experiment -xp 19 resources", "message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get", "as e: Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "1 --project=cats-vs-dogs get ``` \\b ```bash $ polyaxon experiment -xp", "polyaxon experiment -xp 2 unbookmark ``` \"\"\" user, project_name, _experiment", "upload the repo before restarting.\") @click.pass_context @clean_outputs def restart(ctx, copy,", "type=int, help=\"The job id.\") @click.option('--past', '-p', is_flag=True, help=\"Show the past", "experiment: \\b ```bash $ polyaxon experiment get # if experiment", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta:", "response.resources: get_resources(response.resources.to_dict(), header=\"Job resources:\") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition',", "try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except", "non-interactively.\") @click.pass_context @clean_outputs def stop(ctx, yes): \"\"\"Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "if objects: Printer.print_header(\"Statuses:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try:", "Example: \\b ```bash $ polyaxon experiment delete ``` \"\"\" user,", "@click.pass_context @clean_outputs def unbookmark(ctx): \"\"\"Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment --experiment=1 jobs ```", "\\b ```bash $ polyaxon experiment bookmark ``` \\b ```bash $", "update_code = None if file: config = rhea.read(file) # Check", "--name=\"unique-name\" ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict", "description:\") click.echo('{}\\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\") if experiment.declarations: Printer.print_header(\"Experiment", "`{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment))", "help=\"To upload the repo before restarting.\") @click.pass_context @clean_outputs def restart(ctx,", "1 statuses --job 1 ``` \"\"\" def get_experiment_statuses(): try: response", "upload from polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError", "get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\")", "past: try: response = PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True,", "\"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name,", "PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs", "polyaxon experiment unbookmark ``` \\b ```bash $ polyaxon experiment -xp", "resources: \\b ```bash $ polyaxon experiment -xp 19 resources -j", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_resources() else:", "help=\"Automatic yes to prompts. \" \"Assume \\\"yes\\\" as answer to", "@clean_outputs def resources(ctx, job, gpu): \"\"\"Get experiment or experiment job", "`{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "PolyaxonClientException) as e: Printer.print_error('Could not get status for job `{}`.'.format(job))", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is bookmarked.\") @experiment.command()", "job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \\b ```bash", "@experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--gpu', '-g', is_flag=True,", "response = PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time,", "delete ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if", "\"\"\"List jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "experiment.') sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict)", "= PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time,", "$ polyaxon experiment -xp 2 unbookmark ``` \"\"\" user, project_name,", "@click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--gpu', '-g', is_flag=True, help=\"List", "@click.option('--description', type=str, help='Description of the experiment.') @click.option('--tags', type=str, help='Tags of", "polyaxon experiment --experiment=1 get --job=10 ``` \\b ```bash $ polyaxon", "Examples for getting experiment job resources: \\b ```bash $ polyaxon", "Printer.print_header(\"Statuses:\") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page = page or 1", "ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not delete", "= get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int, help=\"The", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The", "status for job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta =", "id.\") @click.pass_context @clean_outputs def get(ctx, job): \"\"\"Get experiment or experiment", "help=\"The project name, e.g. 'mnist' or 'adam/mnist'.\") @click.option('--experiment', '-xp', type=int,", "_experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if not", "as e: Printer.print_error('Could not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources", "2 update --tags=\"foo, bar\" --name=\"unique-name\" ``` \"\"\" user, project_name, _experiment", "stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if not follow: return", "experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user,", "for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for", "_experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "[Caching](/references/polyaxon-cli/#caching) Examples for getting experiment resources: \\b ```bash $ polyaxon", "found for experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for", "experiment -xp 10 -p mnist logs ``` Examples for getting", "if experiment is cached ``` \\b ```bash $ polyaxon experiment", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page or 1 try: response", "not get resources for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "get_experiment_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment.resources(", "-xp 1 --project=cats-vs-dogs get -j 2 ``` \\b ```bash $", "if response.resources: get_resources(response.resources.to_dict(), header=\"Job resources:\") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid',", "def get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page)", "# -*- coding: utf-8 -*- from __future__ import absolute_import, division,", "Printer.print_header(\"Job info:\") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if", "restart ``` \"\"\" config = None update_code = None if", "to all prompts and run non-interactively.\") @click.pass_context @clean_outputs def stop(ctx,", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Jobs", "project, experiment): # pylint:disable=redefined-outer-name \"\"\"Commands for experiments.\"\"\" ctx.obj = ctx.obj", "with.\") @click.option('-u', is_flag=True, default=False, help=\"To upload the repo before restarting.\")", "# Purge caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was restarted with id {}'.format(response.id))", "\"\"\"Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment", "``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user,", "o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True)", "download outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files", "message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon", "# pylint:disable=redefined-outer-name \"\"\"Commands for experiments.\"\"\" ctx.obj = ctx.obj or {}", "experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\") if experiment.declarations: Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations) if", "response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses():", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment bookmark ``` \\b", "\"Assume \\\"yes\\\" as answer to all prompts and run non-interactively.\")", "= PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was restarted", "import ( Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler", "was delete successfully\".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name of the experiment,", "with.\") @click.option('-u', is_flag=True, default=False, help=\"To upload the repo before resuming.\")", "--project=cats-vs-dogs get ``` \\b ```bash $ polyaxon experiment -xp 1", "get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if not follow: return except", "@clean_outputs def stop(ctx, yes): \"\"\"Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "experiment -xp 19 resources -j 1 ``` For GPU resources", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses()", "```bash $ polyaxon experiment bookmark ``` \\b ```bash $ polyaxon", "experiment delete ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\")", "PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "# if experiment is cached ``` \\b ```bash $ polyaxon", "Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\") if experiment.declarations:", "for getting experiment resources: \\b ```bash $ polyaxon experiment -xp", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not unbookmark experiment", "job statuses: \\b ```bash $ polyaxon experiment statuses -j 3", "\"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page", "experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment --experiment=1", "\\b ```bash $ polyaxon experiment -xp 19 resources ``` For", "polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples for", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could get status for", "```bash $ polyaxon experiment -xp 2 unbookmark ``` \"\"\" user,", "ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "if job: _job = get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job',", "get_resources(response.resources.to_dict(), header=\"Job resources:\") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment',", "ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "dict_tabulate(objects, is_list_dict=True) page = page or 1 user, project_name, _experiment", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "@click.pass_context @clean_outputs def stop(ctx, yes): \"\"\"Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "try: PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError,", "cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "1 # if experiment is cached ``` \\b ```bash $", "``` Examples for getting an experiment job: \\b ```bash $", "try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment.resources( user,", "sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files to", "$ polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \\b ```bash", "Examples getting experiment statuses: \\b ```bash $ polyaxon experiment statuses", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy: response = PolyaxonClient().experiment.copy( user,", "1 user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job", "'uuid', 'config', 'project', 'experiments', 'description', 'declarations', 'last_metric', 'resources', 'jobs', 'run_env'", "coding: utf-8 -*- from __future__ import absolute_import, division, print_function import", "the experiment before restarting.\") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon", "'-j', type=int, help=\"The job id.\") @click.option('--gpu', '-g', is_flag=True, help=\"List experiment", "= PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could get status for experiment `{}`.'.format(_experiment))", "polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions import", "delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code ==", "Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "outputs(ctx): \"\"\"Download outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "try: response = PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page) except (PolyaxonHTTPError,", "2 stop ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "polyaxon experiment get -j 1 # if experiment is cached", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment --experiment=1 restart", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.')", "you want to stop \" \"experiment `{}`\".format(_experiment)): click.echo('Existing without stopping", "1 -p alain/cats-vs-dogs get ``` Examples for getting an experiment", "\\b ```bash $ polyaxon experiment --experiment=1 resume ``` \"\"\" config", "```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \\b", "restarting.\") @click.pass_context @clean_outputs def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin", "``` \\b ```bash $ polyaxon experiment -xp 1 statuses --job", "PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager", "polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \\b ```bash $", "ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils import cache from", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and not click.confirm(\"Are sure you", "= PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "204: Printer.print_success(\"Experiment `{}` was delete successfully\".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name", "polyaxon experiment --experiment=1 jobs ``` \"\"\" user, project_name, _experiment =", "'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True,", "humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project', 'experiments', 'description', 'declarations', 'last_metric', 'resources',", "`{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try:", "get # if experiment is cached ``` \\b ```bash $", "\\b ```bash $ polyaxon experiment logs ``` \\b ```bash $", "import clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager", "to upload if u: ctx.invoke(upload, sync=False) update_code = True user,", "experiment logs ``` \\b ```bash $ polyaxon experiment -xp 10", "get -j 2 ``` \\b ```bash $ polyaxon experiment -xp", "\\b ```bash $ polyaxon experiment -xp 10 -p mnist logs", "file, u): # pylint:disable=redefined-builtin \"\"\"Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "import sys import click import rhea from polyaxon_cli.cli.getters.experiment import (", "if experiment.description: Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\")", "tags = validate_tags(tags) if tags: update_dict['tags'] = tags if not", "PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "the list of jobs.\") @click.pass_context @clean_outputs def jobs(ctx, page): \"\"\"List", "``` \\b ```bash $ polyaxon experiment --experiment=1 get ``` \\b", "job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context @clean_outputs", "gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError,", "of the experiment, comma separated values.') @click.pass_context @clean_outputs def update(ctx,", "get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--gpu', '-g',", "message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header=\"Job resources:\") response =", "get -j 2 ``` \"\"\" def get_experiment(): try: response =", "the repo before restarting.\") @click.pass_context @clean_outputs def restart(ctx, copy, file,", "message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment.resources( user, project_name,", "declarations:\") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header(\"Experiment last metrics:\") dict_tabulate(experiment.last_metric) response =", "PolyaxonClientException) as e: Printer.print_error('Could not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "= experiment @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.pass_context", "try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except", "stream=False)(response.content.decode().split('\\n')) print() if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "= description tags = validate_tags(tags) if tags: update_dict['tags'] = tags", "objects: Printer.print_header(\"Statuses:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try: response", "-xp 1 --project=cats-vs-dogs get ``` \\b ```bash $ polyaxon experiment", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment -xp 2 update", "polyaxon experiment -xp 1 statuses --job 1 ``` \"\"\" def", "Printer.print_header(\"Experiment declarations:\") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header(\"Experiment last metrics:\") dict_tabulate(experiment.last_metric) response", "if objects: Printer.print_header(\"Jobs:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j',", "```bash $ polyaxon experiment -xp 1 -j 1 logs ```", "from polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting import ( Printer, dict_tabulate,", "'resources'] )) Printer.print_header(\"Job info:\") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "= page or 1 user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "status_key='status') for o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('job', None)", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs for", "help=\"Show the past logs.\") @click.option('--follow', '-f', is_flag=True, default=False, help=\"Stream logs", "getting experiment job statuses: \\b ```bash $ polyaxon experiment statuses", "before restarting.\") @click.pass_context @clean_outputs def restart(ctx, copy, file, u): #", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could get status for experiment", "in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "PolyaxonClientException) as e: Printer.print_error('Could not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error", "update --description=\"new description for my experiments\" ``` \\b ```bash $", "get(ctx, job): \"\"\"Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples", "project ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job', '-j', type=int, help=\"The job", "```bash $ polyaxon experiment --experiment=1 get ``` \\b ```bash $", "project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "bookmarked.\") @experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx): \"\"\"Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code == 204: Printer.print_success(\"Experiment", "for experiments.\"\"\" ctx.obj = ctx.obj or {} ctx.obj['project'] = project", "Printer.print_error('Could not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment", "```bash $ polyaxon experiment statuses ``` \\b ```bash $ polyaxon", "@clean_outputs def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin \"\"\"Restart experiment.", "info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response", "status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta =", "[Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('experiment',", "_job = get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int,", "logs: \\b ```bash $ polyaxon experiment logs ``` \\b ```bash", "Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import get_logs_handler", "of the experiment, must be unique within the project, could", "if not follow: Printer.print_error( 'Could not get logs for experiment", "else: Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate(", "get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job)", "if objects: Printer.print_header(\"Statuses:\") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page = page", "-xp 1 -p alain/cats-vs-dogs get ``` Examples for getting an", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment bookmark ```", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not delete experiment `{}`.'.format(_experiment))", "repo before restarting.\") @click.pass_context @clean_outputs def restart(ctx, copy, file, u):", "from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs from", "dict_tabulate(meta) else: Printer.print_header('No statuses found for job `{}`.'.format(_job)) objects =", "= {} if name: update_dict['name'] = name if description: update_dict['description']", "yes): \"\"\"Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "logs ``` Examples for getting experiment job logs: \\b ```bash", "_experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "\\b ```bash $ polyaxon experiment -xp 2 stop ``` \"\"\"", "upload the repo before resuming.\") @click.pass_context @clean_outputs def resume(ctx, file,", "for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if", "@click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--page', type=int, help=\"To paginate", "experiment statuses: \\b ```bash $ polyaxon experiment statuses ``` \\b", "files to update with.\") @click.option('-u', is_flag=True, default=False, help=\"To upload the", "copy the experiment before restarting.\") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The", "click import rhea from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local )", "'resources', 'jobs', 'run_env' ]) Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p',", "@click.option('--name', type=str, help='Name of the experiment, must be unique within", "job `{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in", "1 logs ``` \"\"\" def get_experiment_logs(): if past: try: response", "PolyaxonClientException) as e: Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))", "import click import rhea from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local", "\\b ```bash $ polyaxon experiment -xp 2 bookmark ``` \"\"\"", "dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import get_logs_handler from", "from polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if past: try:", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not resume experiment", "get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response)", "None) dict_tabulate(objects, is_list_dict=True) page = page or 1 user, project_name,", "for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command()", "objects: Printer.print_header(\"Jobs:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int,", "statuses found for job `{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')", "$ polyaxon experiment -xp 2 bookmark ``` \"\"\" user, project_name,", "you want to delete experiment `{}`\".format(_experiment)): click.echo('Existing without deleting experiment.')", "experiment -xp 1 outputs ``` \"\"\" user, project_name, _experiment =", "@click.pass_context @clean_outputs def get(ctx, job): \"\"\"Get experiment or experiment job.", "PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "if tags: update_dict['tags'] = tags if not update_dict: Printer.print_warning('No argument", "was restarted with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "1 --gpu ``` \"\"\" def get_experiment_resources(): try: message_handler = Printer.gpu_resources", "@click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--past', '-p', is_flag=True, help=\"Show", "```bash $ polyaxon experiment -xp 19 resources -j 1 --gpu", "o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True)", "None update_code = None if file: config = rhea.read(file) #", "need to upload if u: ctx.invoke(upload, sync=False) update_code = True", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment -xp 1", "polyaxon_cli.utils.formatting import ( Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate ) from", "'adam/mnist'.\") @click.option('--experiment', '-xp', type=int, help=\"The experiment id number.\") @click.pass_context @clean_outputs", "ctx.obj['project'] = project ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job', '-j', type=int,", "and not click.confirm(\"Are sure you want to stop \" \"experiment", "experiment job resources: \\b ```bash $ polyaxon experiment -xp 19", "polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2 ``` \\b", "polyaxon experiment -xp 1 -j 1 logs ``` \"\"\" def", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is bookmarked.\") @experiment.command() @click.pass_context", "logs(ctx, job, past, follow, hide_time): \"\"\"Get experiment or experiment job", "Check if we need to upload if u: ctx.invoke(upload, sync=False)", ")) Printer.print_header(\"Job info:\") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "@experiment.command() @click.pass_context @clean_outputs def delete(ctx): \"\"\"Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example:", "Printer.print_success(\"Experiment `{}` was delete successfully\".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name of", "@experiment.command() @click.option('--copy', '-c', is_flag=True, default=False, help=\"To copy the experiment before", "experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs found for experiment", "type=int, help=\"The job id.\") @click.pass_context @clean_outputs def get(ctx, job): \"\"\"Get", "as e: Printer.print_error('Could get status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "experiment -xp 2 update --tags=\"foo, bar\" --name=\"unique-name\" ``` \"\"\" user,", "response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "stop ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if", "for o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('experiment', None) dict_tabulate(objects,", "if past: try: response = PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False)", "\"\"\"Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \\b ```bash $ polyaxon experiment", "sys.exit(1) Printer.print_success(\"Experiment is being stopped.\") @experiment.command() @click.option('--copy', '-c', is_flag=True, default=False,", "\" \"Assume \\\"yes\\\" as answer to all prompts and run", "restarting.\") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files to update", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment stop ```", "unbookmark ``` \\b ```bash $ polyaxon experiment -xp 2 unbookmark", "try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page) except (PolyaxonHTTPError,", "Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs found", "`{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']])", "-xp 2 bookmark ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "def get_experiment_job_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources", "print() if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "page = page or 1 try: response = PolyaxonClient().experiment.list_jobs( user,", "stream.\") @click.pass_context @clean_outputs def logs(ctx, job, past, follow, hide_time): \"\"\"Get", "``` \"\"\" def get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses( user, project_name,", "to delete experiment `{}`\".format(_experiment)): click.echo('Existing without deleting experiment.') sys.exit(1) try:", "Examples for getting experiment job logs: \\b ```bash $ polyaxon", "`{}`\".format(_experiment)): click.echo('Existing without stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment)", "PolyaxonClientException) as e: Printer.print_error('Could not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "statuses: \\b ```bash $ polyaxon experiment statuses ``` \\b ```bash", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for experiment", "PolyaxonClientException) as e: Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment))", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow: Printer.print_error( 'Could", "for experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']]", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and not click.confirm(\"Are sure", "$ polyaxon experiment -xp 1 statuses --job 1 ``` \"\"\"", "experiment or experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment", "prompts and run non-interactively.\") @click.pass_context @clean_outputs def stop(ctx, yes): \"\"\"Stop", "--experiment=1 get ``` \\b ```bash $ polyaxon experiment -xp 1", "e: Printer.print_error('Could not get status for job `{}`.'.format(job)) Printer.print_error('Error message", "getting experiment job logs: \\b ```bash $ polyaxon experiment -xp", "is_list_dict=True) page = page or 1 user, project_name, _experiment =", "def resources(ctx, job, gpu): \"\"\"Get experiment or experiment job resources.", "hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get", "user, project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print()", "= get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume( user, project_name, _experiment,", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get status", "```bash $ polyaxon experiment -xp 2 update --description=\"new description for", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment updated.\") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y',", "found for job `{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for", "try: response = PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not", "\"\"\"Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment", "def delete(ctx): \"\"\"Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \\b ```bash $", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy: response = PolyaxonClient().experiment.copy(", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "_experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not stop experiment `{}`.'.format(_experiment))", "```bash $ polyaxon experiment -xp 19 resources -j 1 ```", "$ polyaxon experiment --experiment=1 get ``` \\b ```bash $ polyaxon", "if job: _job = get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context", "help=\"To upload the repo before resuming.\") @click.pass_context @clean_outputs def resume(ctx,", "@click.option('--gpu', '-g', is_flag=True, help=\"List experiment GPU resources.\") @click.pass_context @clean_outputs def", "``` For GPU resources \\b ```bash $ polyaxon experiment -xp", "@click.option('--job', '-j', type=int, help=\"The job id.\") @click.pass_context @clean_outputs def get(ctx,", "get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except", "experiment.') @click.option('--tags', type=str, help='Tags of the experiment, comma separated values.')", "e: Printer.print_error('Could not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "resources \\b ```bash $ polyaxon experiment -xp 19 resources --gpu", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page or", "or 1 user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job:", "@click.option('--project', '-p', type=str, help=\"The project name, e.g. 'mnist' or 'adam/mnist'.\")", "@click.pass_context @clean_outputs def bookmark(ctx): \"\"\"Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "-*- from __future__ import absolute_import, division, print_function import sys import", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is being stopped.\")", "job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses: \\b ```bash", "list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation import validate_tags", "`{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment updated.\") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True, default=False,", "```bash $ polyaxon experiment -xp 19 resources --gpu ``` Examples", "for o in response['results']] objects = list_dicts_to_tabulate(objects) if objects: Printer.print_header(\"Jobs:\")", "@clean_outputs def statuses(ctx, job, page): \"\"\"Get experiment or experiment job", "Printer.print_success(\"Experiment is being stopped.\") @experiment.command() @click.option('--copy', '-c', is_flag=True, default=False, help=\"To", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response)", "Examples for getting an experiment job: \\b ```bash $ polyaxon", "@clean_outputs def get(ctx, job): \"\"\"Get experiment or experiment job. Uses", "$ polyaxon experiment -xp 1 statuses ``` Examples getting experiment", "-p mnist logs ``` Examples for getting experiment job logs:", "experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment bookmark", "response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page =", "header=\"Job resources:\") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name',", "PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was copied with", "``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict =", "page = page or 1 user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header=\"Job resources:\") response", "else: Printer.print_header('No statuses found for job `{}`.'.format(_job)) objects = list_dicts_to_tabulate(", "alain/cats-vs-dogs get -j 2 ``` \"\"\" def get_experiment(): try: response", "Purge caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "'-j', type=int, help=\"The job id.\") @click.option('--past', '-p', is_flag=True, help=\"Show the", "not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment updated.\")", "Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if", "from __future__ import absolute_import, division, print_function import sys import click", "Printer.print_header(\"Experiment last metrics:\") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid',", "]) Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str, help=\"The project", "response = PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_logs()", "message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if", "experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name \"\"\"Commands for experiments.\"\"\" ctx.obj =", "experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \\b", "@click.option('--hide_time', is_flag=True, default=False, help=\"Whether or not to hide timestamps from", "Printer.print_success(\"Experiment is bookmarked.\") @experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx): \"\"\"Unbookmark experiment.", "for job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response)", "'-f', is_flag=True, default=False, help=\"Stream logs after showing past logs.\") @click.option('--hide_time',", "$ polyaxon experiment bookmark ``` \\b ```bash $ polyaxon experiment", "for experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o", "try: response = PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page) except (PolyaxonHTTPError,", "experiment --experiment=1 get --job=10 ``` \\b ```bash $ polyaxon experiment", "name, description, tags): \"\"\"Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "follow: Printer.print_error( 'Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error(", "'-j', type=int, help=\"The job id.\") @click.pass_context @clean_outputs def get(ctx, job):", "PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "for getting experiment logs: \\b ```bash $ polyaxon experiment logs", "_experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "PolyaxonClientException) as e: Printer.print_error('Could not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "experiment stop ``` \\b ```bash $ polyaxon experiment -xp 2", "the experiment, must be unique within the project, could be", "resources: \\b ```bash $ polyaxon experiment -xp 19 resources ```", "try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError,", "True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy:", "resources(ctx, job, gpu): \"\"\"Get experiment or experiment job resources. Uses", "timestamps from the log stream.\") @click.pass_context @clean_outputs def logs(ctx, job,", "```bash $ polyaxon experiment -xp 2 bookmark ``` \"\"\" user,", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get status for job", "\\b ```bash $ polyaxon experiment --experiment=1 restart ``` \"\"\" config", "update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment updated.\") get_experiment_details(response)", "before restarting.\") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files to", "-xp 1 -p alain/cats-vs-dogs get -j 2 ``` \"\"\" def", "was copied with id {}'.format(response.id)) else: response = PolyaxonClient().experiment.restart( user,", "message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment updated.\") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True,", "exclude_attrs=[ 'uuid', 'config', 'project', 'experiments', 'description', 'declarations', 'last_metric', 'resources', 'jobs',", "resources ``` For GPU resources \\b ```bash $ polyaxon experiment", "```bash $ polyaxon experiment logs ``` \\b ```bash $ polyaxon", "polyaxon experiment --experiment=1 resume ``` \"\"\" config = None update_code", "@click.option('--page', type=int, help=\"To paginate through the list of statuses.\") @click.pass_context", "being stopped.\") @experiment.command() @click.option('--copy', '-c', is_flag=True, default=False, help=\"To copy the", "Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "-*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function", "`{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Jobs for experiment", "meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs", "@experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.option('--page', type=int, help=\"To", "resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources():", "= Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job,", "tags): \"\"\"Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an", "sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment)", "1 try: response = PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page) except", "{}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not restart", "project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if", "`{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is being stopped.\") @experiment.command() @click.option('--copy', '-c', is_flag=True,", "if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "update_dict['name'] = name if description: update_dict['description'] = description tags =", "is_list_dict=True) def get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job,", "job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if", "page): \"\"\"List jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "def unbookmark(ctx): \"\"\"Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $", "_experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "else: get_experiment() @experiment.command() @click.pass_context @clean_outputs def delete(ctx): \"\"\"Delete experiment. Uses", "to update the experiment.') sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment( user,", "$ polyaxon experiment -xp 2 stop ``` \"\"\" user, project_name,", "= get_meta_response(response) if meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta)", "polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description: Printer.print_header(\"Experiment", "``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not", "project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "Uses [Caching](/references/polyaxon-cli/#caching) Example: \\b ```bash $ polyaxon experiment delete ```", "jobs.\") @click.pass_context @clean_outputs def jobs(ctx, page): \"\"\"List jobs for experiment.", "Examples: \\b ```bash $ polyaxon experiment --experiment=1 restart ``` \"\"\"", "else: Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))", "= validate_tags(tags) if tags: update_dict['tags'] = tags if not update_dict:", "def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin \"\"\"Restart experiment. Uses", "message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True,", "def logs(ctx, job, past, follow, hide_time): \"\"\"Get experiment or experiment", "def jobs(ctx, page): \"\"\"List jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "# Check if we need to upload if u: ctx.invoke(upload,", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment updated.\") get_experiment_details(response) @experiment.command()", "user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {} if", "if meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No", "not get resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "@click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files to update with.\")", "from polyaxon_cli.cli.upload import upload from polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions", "== 204: Printer.print_success(\"Experiment `{}` was delete successfully\".format(_experiment)) @experiment.command() @click.option('--name', type=str,", "= PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) # Purge caching ExperimentManager.purge() except", "Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment)) objects", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "experiment unbookmark ``` \\b ```bash $ polyaxon experiment -xp 2", "experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job():", "= get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int, help=\"The", "want to delete experiment `{}`\".format(_experiment)): click.echo('Existing without deleting experiment.') sys.exit(1)", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download outputs for", "message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Jobs for", "``` \\b ```bash $ polyaxon experiment -xp 2 unbookmark ```", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if past: try: response", "Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found", "experiment -xp 2 unbookmark ``` \"\"\" user, project_name, _experiment =", "True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response =", "message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is bookmarked.\") @experiment.command() @click.pass_context @clean_outputs def", "-xp 10 -p mnist logs ``` Examples for getting experiment", "```bash $ polyaxon experiment stop ``` \\b ```bash $ polyaxon", "try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "the list of statuses.\") @click.pass_context @clean_outputs def statuses(ctx, job, page):", "``` \"\"\" config = None update_code = None if file:", "u): # pylint:disable=redefined-builtin \"\"\"Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "@clean_outputs def unbookmark(ctx): \"\"\"Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash", "\\b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get ```", "```bash $ polyaxon experiment get # if experiment is cached", "import upload from polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError,", "or 'adam/mnist'.\") @click.option('--experiment', '-xp', type=int, help=\"The experiment id number.\") @click.pass_context", "{} if name: update_dict['name'] = name if description: update_dict['description'] =", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code == 204:", "for job `{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error", "``` \\b ```bash $ polyaxon experiment -xp 10 -p mnist", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config,", "try: if copy: response = PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config,", "@click.option('--tags', type=str, help='Tags of the experiment, comma separated values.') @click.pass_context", "= name if description: update_dict['description'] = description tags = validate_tags(tags)", "Printer.print_error('Could not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if", "get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job", "pylint:disable=redefined-outer-name \"\"\"Commands for experiments.\"\"\" ctx.obj = ctx.obj or {} ctx.obj['project']", "the past logs.\") @click.option('--follow', '-f', is_flag=True, default=False, help=\"Stream logs after", "not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response)", "in response['results']] objects = list_dicts_to_tabulate(objects) if objects: Printer.print_header(\"Jobs:\") objects.pop('experiment', None)", "resources:\") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']", "get_meta_response, get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation", "polyaxon experiment -xp 19 resources ``` For GPU resources \\b", "\"\"\"Get experiment or experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for", "for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment", "help=\"The job id.\") @click.option('--past', '-p', is_flag=True, help=\"Show the past logs.\")", "response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "page): \"\"\"Get experiment or experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples", "\"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes", "cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "to hide timestamps from the log stream.\") @click.pass_context @clean_outputs def", "page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get", "= experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project', 'experiments', 'description', 'declarations',", "id.\") @click.option('--past', '-p', is_flag=True, help=\"Show the past logs.\") @click.option('--follow', '-f',", "as e: Printer.print_error('Could not get status for job `{}`.'.format(job)) Printer.print_error('Error", "``` \"\"\" def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)", "-j 1 --gpu ``` \"\"\" def get_experiment_resources(): try: message_handler =", "try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:", "click.confirm(\"Are sure you want to delete experiment `{}`\".format(_experiment)): click.echo('Existing without", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get job `{}`.'.format(_job))", "_job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\\n')) print() if not follow:", "def update(ctx, name, description, tags): \"\"\"Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "```bash $ polyaxon experiment -xp 2 update --tags=\"foo, bar\" --name=\"unique-name\"", "is_flag=True, default=False, help=\"To copy the experiment before restarting.\") @click.option('--file', '-f',", "$ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2 ```", "--description=\"new description for my experiments\" ``` \\b ```bash $ polyaxon", "[Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('job',", "user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume(", "if experiment.last_metric: Printer.print_header(\"Experiment last metrics:\") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True,", "get status for job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta", "-p alain/cats-vs-dogs get ``` Examples for getting an experiment job:", "def get_experiment_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources", "to update with.\") @click.option('-u', is_flag=True, default=False, help=\"To upload the repo", "name: update_dict['name'] = name if description: update_dict['description'] = description tags", "\\b ```bash $ polyaxon experiment get # if experiment is", "_experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not stop", "message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "= page or 1 try: response = PolyaxonClient().experiment.list_jobs( user, project_name,", "$ polyaxon experiment statuses ``` \\b ```bash $ polyaxon experiment", "GPU resources \\b ```bash $ polyaxon experiment -xp 19 resources", "for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs(", "= get_meta_response(response) if meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta)", "experiment): # pylint:disable=redefined-outer-name \"\"\"Commands for experiments.\"\"\" ctx.obj = ctx.obj or", "project name, e.g. 'mnist' or 'adam/mnist'.\") @click.option('--experiment', '-xp', type=int, help=\"The", "pylint:disable=redefined-builtin \"\"\"Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "'jobs', 'run_env' ]) Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str,", "_experiment) # Purge caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "rhea.read(file) # Check if we need to upload if u:", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page or 1 try: response =", "`{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name,", "e: Printer.print_error('Could not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment -xp 2", "copied with id {}'.format(response.id)) else: response = PolyaxonClient().experiment.restart( user, project_name,", "= [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']] objects = list_dicts_to_tabulate(objects) if", "logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try:", "'-p', type=str, help=\"The project name, e.g. 'mnist' or 'adam/mnist'.\") @click.option('--experiment',", "user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "without deleting experiment.') sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment( user, project_name,", "Printer.print_header('No statuses found for job `{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True),", "help=\"To paginate through the list of statuses.\") @click.pass_context @clean_outputs def", "`{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for job `{}`.'.format(_job))", "_job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "name if description: update_dict['description'] = description tags = validate_tags(tags) if", "is_flag=True, default=False, help=\"Automatic yes to prompts. \" \"Assume \\\"yes\\\" as", "update_dict['tags'] = tags if not update_dict: Printer.print_warning('No argument was provided", "click.confirm(\"Are sure you want to stop \" \"experiment `{}`\".format(_experiment)): click.echo('Existing", "if job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context", "experiment.description: Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header=\"Experiment resources:\") if", "in response['results']]) if objects: Printer.print_header(\"Statuses:\") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page", "dict_tabulate(meta) else: Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment)) objects =", "update(ctx, name, description, tags): \"\"\"Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b", "def get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page)", "import PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import", "resources -j 1 --gpu ``` \"\"\" def get_experiment_resources(): try: message_handler", "not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code", "@click.pass_context @clean_outputs def jobs(ctx, page): \"\"\"List jobs for experiment. Uses", "response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project', 'experiments', 'description',", "before resuming.\") @click.pass_context @clean_outputs def resume(ctx, file, u): # pylint:disable=redefined-builtin", "1 outputs ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment'))", "as e: Printer.print_error('Could not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int,", "update_code=update_code) Printer.print_success('Experiment was restarted with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "-xp 1 outputs ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "resume ``` \"\"\" config = None update_code = None if", "logs ``` \"\"\" def get_experiment_logs(): if past: try: response =", "None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\")", "@experiment.command() @click.pass_context @clean_outputs def outputs(ctx): \"\"\"Download outputs for experiment. Uses", "polyaxon experiment -xp 19 resources -j 1 --gpu ``` \"\"\"", "Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "try: response = PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) # Purge caching", "Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is bookmarked.\") @experiment.command() @click.pass_context @clean_outputs", "or not to hide timestamps from the log stream.\") @click.pass_context", "e: Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "want to stop \" \"experiment `{}`\".format(_experiment)): click.echo('Existing without stopping experiment.')", "PolyaxonClientException) as e: Printer.print_error('Could not get resources for job `{}`.'.format(_job))", "1 -j 1 logs ``` \"\"\" def get_experiment_logs(): if past:", "prompts. \" \"Assume \\\"yes\\\" as answer to all prompts and", "ctx.obj.get('experiment')) if not click.confirm(\"Are sure you want to delete experiment", "logs after showing past logs.\") @click.option('--hide_time', is_flag=True, default=False, help=\"Whether or", "as e: Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "experiment, must be unique within the project, could be none.')", "message `{}`.'.format(e)) sys.exit(1) if response.status_code == 204: Printer.print_success(\"Experiment `{}` was", "'-c', is_flag=True, default=False, help=\"To copy the experiment before restarting.\") @click.option('--file',", "or experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment", "get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def outputs(ctx): \"\"\"Download outputs for experiment.", "for getting an experiment: \\b ```bash $ polyaxon experiment get", "--tags=\"foo, bar\" --name=\"unique-name\" ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "hide timestamps from the log stream.\") @click.pass_context @clean_outputs def logs(ctx,", "``` \\b ```bash $ polyaxon experiment -xp 2 bookmark ```", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))", "default=False, help=\"To upload the repo before resuming.\") @click.pass_context @clean_outputs def", "'Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message", "```bash $ polyaxon experiment -xp 10 -p mnist logs ```", "Examples: \\b ```bash $ polyaxon experiment --experiment=1 resume ``` \"\"\"", "resources \\b ```bash $ polyaxon experiment -xp 19 resources -j", "get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page) except", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment unbookmark ``` \\b", "= PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "\\b ```bash $ polyaxon experiment delete ``` \"\"\" user, project_name,", "PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "\\b ```bash $ polyaxon experiment -xp 1 outputs ``` \"\"\"", "if u: ctx.invoke(upload, sync=False) update_code = True user, project_name, _experiment", "get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int, help=\"The job", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page or 1", "statuses ``` Examples getting experiment job statuses: \\b ```bash $", "Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler)", "experiment.') sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) #", "project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was copied with id {}'.format(response.id))", "type=str, help='Description of the experiment.') @click.option('--tags', type=str, help='Tags of the", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume( user,", "-p alain/cats-vs-dogs get -j 2 ``` \"\"\" def get_experiment(): try:", "get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context @clean_outputs def delete(ctx): \"\"\"Delete", "unbookmark(ctx): \"\"\"Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon", "{}'.format(response.id)) else: response = PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config, update_code=update_code)", "`{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help=\"The polyaxon files", "import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment import", "experiment @experiment.command() @click.option('--job', '-j', type=int, help=\"The job id.\") @click.pass_context @clean_outputs", "PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs for job", "unique within the project, could be none.') @click.option('--description', type=str, help='Description", "PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) # Purge caching ExperimentManager.purge() except (PolyaxonHTTPError,", "Printer.print_error('Could not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command()", "``` \\b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get", "Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e))", "= Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment,", "@clean_outputs def outputs(ctx): \"\"\"Download outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples:", "'declarations', 'last_metric', 'resources', 'jobs', 'run_env' ]) Printer.print_header(\"Experiment info:\") dict_tabulate(Printer.add_status_color(response)) @click.group()", "import rhea from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local ) from", "be none.') @click.option('--description', type=str, help='Description of the experiment.') @click.option('--tags', type=str,", "as e: Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try: message_handler", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment --experiment=1 resume ```", "[Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment -xp 1 outputs", "statuses(ctx, job, page): \"\"\"Get experiment or experiment job statuses. Uses", "PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs for experiment", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if past:", "message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int, help=\"To paginate through the", "project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could", "get ``` Examples for getting an experiment job: \\b ```bash", "-xp 19 resources --gpu ``` Examples for getting experiment job", "PolyaxonClientException) as e: Printer.print_error('Could not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "@click.option('--copy', '-c', is_flag=True, default=False, help=\"To copy the experiment before restarting.\")", "'-p', is_flag=True, help=\"Show the past logs.\") @click.option('--follow', '-f', is_flag=True, default=False,", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except", "message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs def bookmark(ctx):", "PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was resumed with", "PolyaxonClientException) as e: Printer.print_error('Could not get logs for job `{}`.'.format(_job))", "default=False, help=\"To copy the experiment before restarting.\") @click.option('--file', '-f', multiple=True,", "e: Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message", "Printer.print_error('Could not get resources for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e))", "PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as", "Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name, _experiment,", "-j 1 # if experiment is cached ``` \\b ```bash", "project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and not", "response['results']] objects = list_dicts_to_tabulate(objects) if objects: Printer.print_header(\"Jobs:\") objects.pop('experiment', None) dict_tabulate(objects,", "Printer.print_error('Could get status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "_job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not", "type=int, help=\"The experiment id number.\") @click.pass_context @clean_outputs def experiment(ctx, project,", "run non-interactively.\") @click.pass_context @clean_outputs def stop(ctx, yes): \"\"\"Stop experiment. Uses", "get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm(\"Are sure you want to delete", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment))", "Printer.print_header(\"Statuses:\") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try: response =", "number.\") @click.pass_context @clean_outputs def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name \"\"\"Commands", "for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs found for", "experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \\b ```bash $ polyaxon experiment stop", "else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "_experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark", "response = PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was", "polyaxon experiment logs ``` \\b ```bash $ polyaxon experiment -xp", "-xp 1 statuses ``` Examples getting experiment job statuses: \\b", "polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment): #", "experiment job statuses: \\b ```bash $ polyaxon experiment statuses -j", "\\b ```bash $ polyaxon experiment statuses ``` \\b ```bash $", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {} if name: update_dict['name']", "restart(ctx, copy, file, u): # pylint:disable=redefined-builtin \"\"\"Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching)", "config=config, update_code=update_code) Printer.print_success('Experiment was copied with id {}'.format(response.id)) else: response", "page or 1 user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if", "e: Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "past logs.\") @click.option('--follow', '-f', is_flag=True, default=False, help=\"Stream logs after showing", "$ polyaxon experiment -xp 2 update --tags=\"foo, bar\" --name=\"unique-name\" ```", "if meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No", "statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses: \\b ```bash $", "delete successfully\".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name of the experiment, must", "not get status for job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1)", "deleting experiment.') sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment)", "(PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment))", "as e: Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e))", "For GPU resources \\b ```bash $ polyaxon experiment -xp 19", "`{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta:", "except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get job", "get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description: Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description)) if experiment.resources:", "sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs def bookmark(ctx): \"\"\"Bookmark experiment.", "or experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses:", "= rhea.read(file) # Check if we need to upload if", "experiment is cached ``` \\b ```bash $ polyaxon experiment --experiment=1", "update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not update", "meta = get_meta_response(response) if meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:')", "update the experiment.') sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment( user, project_name,", "Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for", "Printer.print_success('Experiment was resumed with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException)", "message `{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is being stopped.\") @experiment.command() @click.option('--copy', '-c',", "exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header(\"Job info:\") dict_tabulate(response) user,", "user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was restarted with id", "experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \\b ```bash $ polyaxon experiment delete", "`{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try: message_handler =", "sys import click import rhea from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local,", "the experiment.') sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment( user, project_name, _experiment,", "or 1 try: response = PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page)", "\\b ```bash $ polyaxon experiment -xp 1 -j 1 logs", "`{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if past: try: response = PolyaxonClient().experiment_job.logs(", "tags: update_dict['tags'] = tags if not update_dict: Printer.print_warning('No argument was", "last metrics:\") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config',", "_experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_resources()", "```bash $ polyaxon experiment -xp 2 stop ``` \"\"\" user,", "[Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']] objects = list_dicts_to_tabulate(objects) if objects:", "response = PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was", "PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description: Printer.print_header(\"Experiment description:\") click.echo('{}\\n'.format(experiment.description))", "is_flag=True, help=\"List experiment GPU resources.\") @click.pass_context @clean_outputs def resources(ctx, job,", "polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job import", "Printer.print_error('Could not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command()", "experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True,", "[Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \\b ```bash $ polyaxon", "`{}`.'.format(e)) sys.exit(1) Printer.print_success(\"Experiment is bookmarked.\") @experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx):", "= PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError,", "`{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header=\"Job resources:\") response = Printer.add_status_color(response.to_light_dict(", "from polyaxon_cli.utils.formatting import ( Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate )", "objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user,", "-xp 2 stop ``` \"\"\" user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),", "logs for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name," ]
[ "9 + 16 = 25 = 52. There exists exactly", "b < c: if a + b + c ==", "a < b < c, for which, a2 + b2", "= True break if solved: break if solved: break product", "is a set of three natural numbers, a < b", "for which a + b + c = 1000. Find", "a + b + c == 1000: if a**2 +", "if solved: break product = a*b*c print(\"The product of only", "There exists exactly one Pythagorean triplet for which a +", "== 1000: if a**2 + b**2 == c**2: solved =", "+ c = 1000. Find the product abc. \"\"\" solved", "1000: if a**2 + b**2 == c**2: solved = True", "in range(1, 1000): for b in range(1, 1000): for c", "\"\"\" A Pythagorean triplet is a set of three natural", "example, 32 + 42 = 9 + 16 = 25", "a*b*c print(\"The product of only triplet who exists is :", "if solved: break if solved: break product = a*b*c print(\"The", "= a*b*c print(\"The product of only triplet who exists is", "for c in range(1, 1000): if a < b <", "For example, 32 + 42 = 9 + 16 =", "print(\"The product of only triplet who exists is : \",", "a + b + c = 1000. Find the product", "+ b2 = c2 For example, 32 + 42 =", "c: if a + b + c == 1000: if", "range(1, 1000): for c in range(1, 1000): if a <", "c2 For example, 32 + 42 = 9 + 16", "= 25 = 52. There exists exactly one Pythagorean triplet", "1000): for c in range(1, 1000): if a < b", "Pythagorean triplet is a set of three natural numbers, a", "+ b**2 == c**2: solved = True break if solved:", "< c: if a + b + c == 1000:", "product abc. \"\"\" solved = False for a in range(1,", "b in range(1, 1000): for c in range(1, 1000): if", "c**2: solved = True break if solved: break if solved:", "solved: break if solved: break product = a*b*c print(\"The product", "solved = True break if solved: break if solved: break", "break if solved: break product = a*b*c print(\"The product of", "b < c, for which, a2 + b2 = c2", "16 = 25 = 52. There exists exactly one Pythagorean", "= False for a in range(1, 1000): for b in", "= 52. There exists exactly one Pythagorean triplet for which", "\"\"\" solved = False for a in range(1, 1000): for", "+ 42 = 9 + 16 = 25 = 52.", "Find the product abc. \"\"\" solved = False for a", "+ 16 = 25 = 52. There exists exactly one", "= c2 For example, 32 + 42 = 9 +", "1000): if a < b < c: if a +", "c == 1000: if a**2 + b**2 == c**2: solved", "25 = 52. There exists exactly one Pythagorean triplet for", "set of three natural numbers, a < b < c,", "numbers, a < b < c, for which, a2 +", "for which, a2 + b2 = c2 For example, 32", "a2 + b2 = c2 For example, 32 + 42", "a**2 + b**2 == c**2: solved = True break if", "exists exactly one Pythagorean triplet for which a + b", "A Pythagorean triplet is a set of three natural numbers,", "which a + b + c = 1000. Find the", "the product abc. \"\"\" solved = False for a in", "for b in range(1, 1000): for c in range(1, 1000):", "break product = a*b*c print(\"The product of only triplet who", "a in range(1, 1000): for b in range(1, 1000): for", "which, a2 + b2 = c2 For example, 32 +", "range(1, 1000): if a < b < c: if a", "in range(1, 1000): if a < b < c: if", "in range(1, 1000): for c in range(1, 1000): if a", "range(1, 1000): for b in range(1, 1000): for c in", "if a + b + c == 1000: if a**2", "natural numbers, a < b < c, for which, a2", "b**2 == c**2: solved = True break if solved: break", "triplet for which a + b + c = 1000.", "c = 1000. Find the product abc. \"\"\" solved =", "== c**2: solved = True break if solved: break if", "c in range(1, 1000): if a < b < c:", "True break if solved: break if solved: break product =", "of three natural numbers, a < b < c, for", "< b < c, for which, a2 + b2 =", "+ b + c = 1000. Find the product abc.", "product of only triplet who exists is : \", product)", "a < b < c: if a + b +", "exactly one Pythagorean triplet for which a + b +", "1000): for b in range(1, 1000): for c in range(1,", "triplet is a set of three natural numbers, a <", "break if solved: break if solved: break product = a*b*c", "if a < b < c: if a + b", "False for a in range(1, 1000): for b in range(1,", "if a**2 + b**2 == c**2: solved = True break", "1000. Find the product abc. \"\"\" solved = False for", "42 = 9 + 16 = 25 = 52. There", "= 1000. Find the product abc. \"\"\" solved = False", "+ b + c == 1000: if a**2 + b**2", "product = a*b*c print(\"The product of only triplet who exists", "b2 = c2 For example, 32 + 42 = 9", "a set of three natural numbers, a < b <", "one Pythagorean triplet for which a + b + c", "abc. \"\"\" solved = False for a in range(1, 1000):", "< c, for which, a2 + b2 = c2 For", "solved = False for a in range(1, 1000): for b", "for a in range(1, 1000): for b in range(1, 1000):", "b + c = 1000. Find the product abc. \"\"\"", "b + c == 1000: if a**2 + b**2 ==", "c, for which, a2 + b2 = c2 For example,", "+ c == 1000: if a**2 + b**2 == c**2:", "solved: break product = a*b*c print(\"The product of only triplet", "< b < c: if a + b + c", "52. There exists exactly one Pythagorean triplet for which a", "Pythagorean triplet for which a + b + c =", "three natural numbers, a < b < c, for which,", "32 + 42 = 9 + 16 = 25 =", "= 9 + 16 = 25 = 52. There exists" ]
[ "= t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1)", "t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test')", "# Process raw media. raw_media.create_processed_task() transcript = refresh(transcript) # Check", "os from django.test import TestCase from unipath import Path from", "= TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as", "from unipath import Path from ....utils import refresh from ...media", "transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test')", "expected_length = 5 * 60 # 5 minutes. self.assertAlmostEqual( transcript.length,", "MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length,", "import refresh from ...media import tests from ..models import Transcript,", "with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process", "self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00'))", "self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1", "open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw", "unipath import Path from ....utils import refresh from ...media import", "is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f))", "MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def", "...media import tests from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH =", "t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0)", "Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right,", "f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t", "'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw media.", "raw_media.save() # Process raw media. raw_media.create_processed_task() transcript = refresh(transcript) #", "0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 = t.fragments.all()", "as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw media. raw_media.create_processed_task()", "def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start,", "refresh from ...media import tests from ..models import Transcript, TranscriptMedia", "self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1': from django.core.files", "self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST')", "f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if", "t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0)", "self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2)", "= Transcript.objects.create( title='test transcript', ) raw_media = TranscriptMedia( transcript=transcript, is_processed=False,", "import os from django.test import TestCase from unipath import Path", "<filename>fanscribed/apps/transcripts/tests/test_transcripts.py from decimal import Decimal import os from django.test import", "None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all()", "f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start,", "Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left,", "Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(),", "File(f)) raw_media.save() # Process raw media. raw_media.create_processed_task() transcript = refresh(transcript)", "self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1)", "def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript', ) raw_media =", "transcript = refresh(transcript) # Check length. expected_length = 5 *", "title='test transcript', ) raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, )", "t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right,", "from django.core.files import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript =", "import tests from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata')", "f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t", "f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end,", "= 5 * 60 # 5 minutes. self.assertAlmostEqual( transcript.length, expected_length,", "= refresh(transcript) # Check length. expected_length = 5 * 60", "'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None)", "t.set_length('17.77') f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00'))", "....utils import refresh from ...media import tests from ..models import", "t = Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end,", "Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 =", "self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77')", "class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript', )", "Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all()", "test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript', ) raw_media = TranscriptMedia(", "from decimal import Decimal import os from django.test import TestCase", "TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase):", "Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0,", "self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1': from django.core.files import File", "f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00'))", "self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0, =", "import Decimal import os from django.test import TestCase from unipath", "s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left,", "'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') !=", "media. raw_media.create_processed_task() transcript = refresh(transcript) # Check length. expected_length =", "test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t =", "RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript =", "from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH =", "= Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self):", "..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child(", "is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save()", "= t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t =", "def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t", "import TestCase from unipath import Path from ....utils import refresh", "self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77')", "from ....utils import refresh from ...media import tests from ..models", "Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end,", "'notready') if os.environ.get('FAST_TEST') != '1': from django.core.files import File class", "TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self):", "t = Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end,", "s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready')", "if os.environ.get('FAST_TEST') != '1': from django.core.files import File class SlowTranscriptsTestCase(TestCase):", "length. expected_length = 5 * 60 # 5 minutes. self.assertAlmostEqual(", "self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all()", "os.environ.get('FAST_TEST') != '1': from django.core.files import File class SlowTranscriptsTestCase(TestCase): def", "Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0,", "Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class", "# Check length. expected_length = 5 * 60 # 5", "SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript', ) raw_media", "Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0,", "0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00'))", "Process raw media. raw_media.create_processed_task() transcript = refresh(transcript) # Check length.", "import Path from ....utils import refresh from ...media import tests", "Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0,", "= Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77'))", "refresh(transcript) # Check length. expected_length = 5 * 60 #", "'1': from django.core.files import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript", "raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb')", "decimal import Decimal import os from django.test import TestCase from", "= t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00'))", "= Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33')", "transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()),", "self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0)", "raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw media. raw_media.create_processed_task() transcript =", "Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript", "self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready')", "f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1': from", "Path from ....utils import refresh from ...media import tests from", "self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start,", "self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0, =", "5 * 60 # 5 minutes. self.assertAlmostEqual( transcript.length, expected_length, delta=0.2)", "test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00'))", "t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start,", "class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def", "raw_media.create_processed_task() transcript = refresh(transcript) # Check length. expected_length = 5", "t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test')", "from django.test import TestCase from unipath import Path from ....utils", "!= '1': from django.core.files import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self):", "import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute()", "self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77'))", "Decimal import os from django.test import TestCase from unipath import", "transcript = Transcript.objects.create( title='test transcript', ) raw_media = TranscriptMedia( transcript=transcript,", "self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 =", "f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state,", "import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test", "= Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00'))", "t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start,", ") with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() #", "Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(),", "self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1':", ") raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH,", "Check length. expected_length = 5 * 60 # 5 minutes.", "django.test import TestCase from unipath import Path from ....utils import", "TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as f:", "Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2", "TestCase from unipath import Path from ....utils import refresh from", "django.core.files import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create(", "Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end,", "from ...media import tests from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH", "raw media. raw_media.create_processed_task() transcript = refresh(transcript) # Check length. expected_length", "self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1,", "= t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t =", "f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw media. raw_media.create_processed_task() transcript", "Transcript.objects.create( title='test transcript', ) raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True,", "File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript',", "tests from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH", "= Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33'))", "transcript', ) raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with", "f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1': from django.core.files import", "Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state,", "= MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test')" ]
[ "any links detail = ''.join(detailsplit[::2]) #Finally, we join every other", "(passing over the link groups) into one string #print(detail) return", "list' featHolder['date'] = datetime.date.today().strftime(\"%B %d, %Y\") def get_details(link): res =", "#if t > 5: #break return feats listOfPages = codecs.open(\"ancestryFeats.csv\",", "grab the content from the meta tag detailsplit = re.split('<(.*?)>',", "def get_details(link): res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml')", "= \"https://2e.aonprd.com/\" +link feat['prereq'] = prereq feat['benefits'] = source details", "1 #print(row) #print(\"-----------------------------------\") feat = {} entries = row.find_all(lambda tag:", "< >, to pull out any links detail = ''.join(detailsplit[::2])", "listOfPages = codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for line in listOfPages: featMD =", "source details = get_details(feat['link']) feat['text'] = details feats.append(feat) #if t", "res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') table = soup.find(lambda tag: tag.name=='table'", "row.find_all(lambda tag: tag.name=='td') if entries is not None: if len(entries)", "featMD[0],\"This url:\", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n')) json_data = json.dumps(featHolder, indent=4)", "together (passing over the link groups) into one string #print(detail)", "2.0 Ancestry feat list' featHolder['date'] = datetime.date.today().strftime(\"%B %d, %Y\") def", "feats for :\", featMD[0],\"This url:\", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n')) json_data", "#Now we split it into groups of strings seperated by", "in entries: # print(entry) # print(\"row---------------\") level = entries[1].text traits", "return feats listOfPages = codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for line in listOfPages:", "#break return feats listOfPages = codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for line in", "= entries[4].text feat['name'] = name feat['level'] = level feat['traits'] =", "#First we grab the content from the meta tag detailsplit", "+= 1 #print(row) #print(\"-----------------------------------\") feat = {} entries = row.find_all(lambda", "= soup.find(\"meta\", {'name':'description'})['content'] #First we grab the content from the", "listOfPages: featMD = line.split(\",\") print(\"Getting feats for :\", featMD[0],\"This url:\",", "print(entry) # print(\"row---------------\") level = entries[1].text traits = entries[2].text prereq", "= requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') table = soup.find(lambda", "tag: tag.name=='tr') t = 0 for row in rows: t", "entries[1].text traits = entries[2].text prereq = entries[3].text source = entries[4].text", "tag.name=='table' and tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\") rows = table.findAll(lambda tag: tag.name=='tr')", "detailraw) #Now we split it into groups of strings seperated", "json.dumps(featHolder, indent=4) #print(json_data) filename = \"ancestry-feats-pf2.json\" f = open(filename, \"w\")", "> 0: name = entries[0].find(\"a\").next_sibling.text #We do next_sibling here because", "details = get_details(feat['link']) feat['text'] = details feats.append(feat) #if t >", "= entries[2].text prereq = entries[3].text source = entries[4].text feat['name'] =", "featMD = line.split(\",\") print(\"Getting feats for :\", featMD[0],\"This url:\", featMD[2])", "import datetime import codecs import re featHolder = {} featHolder['name']", "%Y\") def get_details(link): res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text,", "requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') feat = soup.find_all(\"div\", {'class':'main'})", "rows: t += 1 #print(row) #print(\"-----------------------------------\") feat = {} entries", "we grab the content from the meta tag detailsplit =", "#Finally, we join every other group together (passing over the", "= entries[0].find(\"a\").next_sibling.text #We do next_sibling here because the source puts", "level feat['traits'] = traits.split(\", \") feat['link'] = \"https://2e.aonprd.com/\" +link feat['prereq']", "from bs4 import BeautifulSoup import requests import json import datetime", "import codecs import re featHolder = {} featHolder['name'] = 'Pathfinder", "puts PFS links first, which we want to skip over.", "indent=4) #print(json_data) filename = \"ancestry-feats-pf2.json\" f = open(filename, \"w\") f.write(json_data)", "#print(\"-----------------------------------\") feat = {} entries = row.find_all(lambda tag: tag.name=='td') if", "and tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\") rows = table.findAll(lambda tag: tag.name=='tr') t", "Ancestry feat list' featHolder['date'] = datetime.date.today().strftime(\"%B %d, %Y\") def get_details(link):", "featHolder['date'] = datetime.date.today().strftime(\"%B %d, %Y\") def get_details(link): res = requests.get(link)", "= 0 for row in rows: t += 1 #print(row)", "which we want to skip over. link = entries[0].find(\"a\").next_sibling.a['href'] #for", "requests import json import datetime import codecs import re featHolder", "= soup.find_all(\"div\", {'class':'main'}) detailraw = soup.find(\"meta\", {'name':'description'})['content'] #First we grab", "is not None: if len(entries) > 0: name = entries[0].find(\"a\").next_sibling.text", "PFS links first, which we want to skip over. link", "detailsplit = re.split('<(.*?)>', detailraw) #Now we split it into groups", "json import datetime import codecs import re featHolder = {}", "\") feat['link'] = \"https://2e.aonprd.com/\" +link feat['prereq'] = prereq feat['benefits'] =", "content from the meta tag detailsplit = re.split('<(.*?)>', detailraw) #Now", "''.join(detailsplit[::2]) #Finally, we join every other group together (passing over", "feat['traits'] = traits.split(\", \") feat['link'] = \"https://2e.aonprd.com/\" +link feat['prereq'] =", "import json import datetime import codecs import re featHolder =", "return detail def get_feats(link): feats = [] res = requests.get(link)", "from the meta tag detailsplit = re.split('<(.*?)>', detailraw) #Now we", "re featHolder = {} featHolder['name'] = 'Pathfinder 2.0 Ancestry feat", "json_data = json.dumps(featHolder, indent=4) #print(json_data) filename = \"ancestry-feats-pf2.json\" f =", "= entries[1].text traits = entries[2].text prereq = entries[3].text source =", "entries = row.find_all(lambda tag: tag.name=='td') if entries is not None:", "name feat['level'] = level feat['traits'] = traits.split(\", \") feat['link'] =", "entries[2].text prereq = entries[3].text source = entries[4].text feat['name'] = name", "t += 1 #print(row) #print(\"-----------------------------------\") feat = {} entries =", "not None: if len(entries) > 0: name = entries[0].find(\"a\").next_sibling.text #We", "strings seperated by < >, to pull out any links", "= entries[0].find(\"a\").next_sibling.a['href'] #for entry in entries: # print(entry) # print(\"row---------------\")", "here because the source puts PFS links first, which we", "# print(\"row---------------\") level = entries[1].text traits = entries[2].text prereq =", "it into groups of strings seperated by < >, to", "table.findAll(lambda tag: tag.name=='tr') t = 0 for row in rows:", "feat['name'] = name feat['level'] = level feat['traits'] = traits.split(\", \")", "feat['text'] = details feats.append(feat) #if t > 5: #break return", ">, to pull out any links detail = ''.join(detailsplit[::2]) #Finally,", "entries[0].find(\"a\").next_sibling.a['href'] #for entry in entries: # print(entry) # print(\"row---------------\") level", ":\", featMD[0],\"This url:\", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n')) json_data = json.dumps(featHolder,", "tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\") rows = table.findAll(lambda tag: tag.name=='tr') t =", "link = entries[0].find(\"a\").next_sibling.a['href'] #for entry in entries: # print(entry) #", "if len(entries) > 0: name = entries[0].find(\"a\").next_sibling.text #We do next_sibling", "over the link groups) into one string #print(detail) return detail", "next_sibling here because the source puts PFS links first, which", "#for entry in entries: # print(entry) # print(\"row---------------\") level =", "[] res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') table", "for row in rows: t += 1 #print(row) #print(\"-----------------------------------\") feat", "= ''.join(detailsplit[::2]) #Finally, we join every other group together (passing", "line in listOfPages: featMD = line.split(\",\") print(\"Getting feats for :\",", "= requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') feat = soup.find_all(\"div\",", "0 for row in rows: t += 1 #print(row) #print(\"-----------------------------------\")", "import requests import json import datetime import codecs import re", "= datetime.date.today().strftime(\"%B %d, %Y\") def get_details(link): res = requests.get(link) res.raise_for_status()", "skip over. link = entries[0].find(\"a\").next_sibling.a['href'] #for entry in entries: #", "if entries is not None: if len(entries) > 0: name", "feats = [] res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text,", "= BeautifulSoup(res.text, 'lxml') feat = soup.find_all(\"div\", {'class':'main'}) detailraw = soup.find(\"meta\",", "links first, which we want to skip over. link =", "table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\") rows", "the content from the meta tag detailsplit = re.split('<(.*?)>', detailraw)", "soup = BeautifulSoup(res.text, 'lxml') feat = soup.find_all(\"div\", {'class':'main'}) detailraw =", "import re featHolder = {} featHolder['name'] = 'Pathfinder 2.0 Ancestry", "len(entries) > 0: name = entries[0].find(\"a\").next_sibling.text #We do next_sibling here", "we join every other group together (passing over the link", "None: if len(entries) > 0: name = entries[0].find(\"a\").next_sibling.text #We do", "res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') feat = soup.find_all(\"div\", {'class':'main'}) detailraw", "= name feat['level'] = level feat['traits'] = traits.split(\", \") feat['link']", "= get_feats(featMD[2].strip('\\n')) json_data = json.dumps(featHolder, indent=4) #print(json_data) filename = \"ancestry-feats-pf2.json\"", "= 'Pathfinder 2.0 Ancestry feat list' featHolder['date'] = datetime.date.today().strftime(\"%B %d,", "BeautifulSoup(res.text, 'lxml') feat = soup.find_all(\"div\", {'class':'main'}) detailraw = soup.find(\"meta\", {'name':'description'})['content']", "= soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\") rows =", "= prereq feat['benefits'] = source details = get_details(feat['link']) feat['text'] =", "feat list' featHolder['date'] = datetime.date.today().strftime(\"%B %d, %Y\") def get_details(link): res", "+link feat['prereq'] = prereq feat['benefits'] = source details = get_details(feat['link'])", "detailraw = soup.find(\"meta\", {'name':'description'})['content'] #First we grab the content from", "one string #print(detail) return detail def get_feats(link): feats = []", "line.split(\",\") print(\"Getting feats for :\", featMD[0],\"This url:\", featMD[2]) featHolder[featMD[1]] =", "groups) into one string #print(detail) return detail def get_feats(link): feats", "other group together (passing over the link groups) into one", "get_details(link): res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') feat", "soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\") rows = table.findAll(lambda", "for line in listOfPages: featMD = line.split(\",\") print(\"Getting feats for", "seperated by < >, to pull out any links detail", "into one string #print(detail) return detail def get_feats(link): feats =", "= {} entries = row.find_all(lambda tag: tag.name=='td') if entries is", "soup.find(\"meta\", {'name':'description'})['content'] #First we grab the content from the meta", "print(\"row---------------\") level = entries[1].text traits = entries[2].text prereq = entries[3].text", "BeautifulSoup(res.text, 'lxml') table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and", "to skip over. link = entries[0].find(\"a\").next_sibling.a['href'] #for entry in entries:", "entries[4].text feat['name'] = name feat['level'] = level feat['traits'] = traits.split(\",", "print(\"Getting feats for :\", featMD[0],\"This url:\", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n'))", "{} entries = row.find_all(lambda tag: tag.name=='td') if entries is not", "def get_feats(link): feats = [] res = requests.get(link) res.raise_for_status() soup", "tag: tag.name=='table' and tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\") rows = table.findAll(lambda tag:", "over. link = entries[0].find(\"a\").next_sibling.a['href'] #for entry in entries: # print(entry)", "= get_details(feat['link']) feat['text'] = details feats.append(feat) #if t > 5:", "because the source puts PFS links first, which we want", "feat['level'] = level feat['traits'] = traits.split(\", \") feat['link'] = \"https://2e.aonprd.com/\"", "5: #break return feats listOfPages = codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for line", "groups of strings seperated by < >, to pull out", "pull out any links detail = ''.join(detailsplit[::2]) #Finally, we join", "into groups of strings seperated by < >, to pull", "= re.split('<(.*?)>', detailraw) #Now we split it into groups of", "out any links detail = ''.join(detailsplit[::2]) #Finally, we join every", "= row.find_all(lambda tag: tag.name=='td') if entries is not None: if", "import BeautifulSoup import requests import json import datetime import codecs", "want to skip over. link = entries[0].find(\"a\").next_sibling.a['href'] #for entry in", "datetime import codecs import re featHolder = {} featHolder['name'] =", "first, which we want to skip over. link = entries[0].find(\"a\").next_sibling.a['href']", "meta tag detailsplit = re.split('<(.*?)>', detailraw) #Now we split it", "'lxml') feat = soup.find_all(\"div\", {'class':'main'}) detailraw = soup.find(\"meta\", {'name':'description'})['content'] #First", "group together (passing over the link groups) into one string", "link groups) into one string #print(detail) return detail def get_feats(link):", "%d, %Y\") def get_details(link): res = requests.get(link) res.raise_for_status() soup =", "we split it into groups of strings seperated by <", "entries[0].find(\"a\").next_sibling.text #We do next_sibling here because the source puts PFS", "= source details = get_details(feat['link']) feat['text'] = details feats.append(feat) #if", "tag.name=='td') if entries is not None: if len(entries) > 0:", "0: name = entries[0].find(\"a\").next_sibling.text #We do next_sibling here because the", "links detail = ''.join(detailsplit[::2]) #Finally, we join every other group", "= json.dumps(featHolder, indent=4) #print(json_data) filename = \"ancestry-feats-pf2.json\" f = open(filename,", "t > 5: #break return feats listOfPages = codecs.open(\"ancestryFeats.csv\", encoding='utf-8')", "codecs import re featHolder = {} featHolder['name'] = 'Pathfinder 2.0", "level = entries[1].text traits = entries[2].text prereq = entries[3].text source", "'Pathfinder 2.0 Ancestry feat list' featHolder['date'] = datetime.date.today().strftime(\"%B %d, %Y\")", "= line.split(\",\") print(\"Getting feats for :\", featMD[0],\"This url:\", featMD[2]) featHolder[featMD[1]]", "BeautifulSoup import requests import json import datetime import codecs import", "feat = soup.find_all(\"div\", {'class':'main'}) detailraw = soup.find(\"meta\", {'name':'description'})['content'] #First we", "get_feats(featMD[2].strip('\\n')) json_data = json.dumps(featHolder, indent=4) #print(json_data) filename = \"ancestry-feats-pf2.json\" f", "tag detailsplit = re.split('<(.*?)>', detailraw) #Now we split it into", "source = entries[4].text feat['name'] = name feat['level'] = level feat['traits']", "# print(entry) # print(\"row---------------\") level = entries[1].text traits = entries[2].text", "encoding='utf-8') for line in listOfPages: featMD = line.split(\",\") print(\"Getting feats", "source puts PFS links first, which we want to skip", "featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n')) json_data = json.dumps(featHolder, indent=4) #print(json_data) filename", "prereq feat['benefits'] = source details = get_details(feat['link']) feat['text'] = details", "name = entries[0].find(\"a\").next_sibling.text #We do next_sibling here because the source", "featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n')) json_data = json.dumps(featHolder, indent=4) #print(json_data) filename =", "codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for line in listOfPages: featMD = line.split(\",\") print(\"Getting", "entry in entries: # print(entry) # print(\"row---------------\") level = entries[1].text", "#print(detail) return detail def get_feats(link): feats = [] res =", "#print(json_data) filename = \"ancestry-feats-pf2.json\" f = open(filename, \"w\") f.write(json_data) f.close", "entries: # print(entry) # print(\"row---------------\") level = entries[1].text traits =", "get_details(feat['link']) feat['text'] = details feats.append(feat) #if t > 5: #break", "= level feat['traits'] = traits.split(\", \") feat['link'] = \"https://2e.aonprd.com/\" +link", "bs4 import BeautifulSoup import requests import json import datetime import", "soup.find_all(\"div\", {'class':'main'}) detailraw = soup.find(\"meta\", {'name':'description'})['content'] #First we grab the", "res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') table =", "tag.name=='tr') t = 0 for row in rows: t +=", "split it into groups of strings seperated by < >,", "{} featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list' featHolder['date'] =", "= [] res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml')", "{'class':'main'}) detailraw = soup.find(\"meta\", {'name':'description'})['content'] #First we grab the content", "by < >, to pull out any links detail =", "detail = ''.join(detailsplit[::2]) #Finally, we join every other group together", "#print(row) #print(\"-----------------------------------\") feat = {} entries = row.find_all(lambda tag: tag.name=='td')", "details feats.append(feat) #if t > 5: #break return feats listOfPages", "url:\", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n')) json_data = json.dumps(featHolder, indent=4) #print(json_data)", "the link groups) into one string #print(detail) return detail def", "get_feats(link): feats = [] res = requests.get(link) res.raise_for_status() soup =", "detail def get_feats(link): feats = [] res = requests.get(link) res.raise_for_status()", "prereq = entries[3].text source = entries[4].text feat['name'] = name feat['level']", "traits.split(\", \") feat['link'] = \"https://2e.aonprd.com/\" +link feat['prereq'] = prereq feat['benefits']", "res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') feat =", "{'name':'description'})['content'] #First we grab the content from the meta tag", "feat['prereq'] = prereq feat['benefits'] = source details = get_details(feat['link']) feat['text']", "and tag['id']==\"ctl00_MainContent_TableElement\") rows = table.findAll(lambda tag: tag.name=='tr') t = 0", "= details feats.append(feat) #if t > 5: #break return feats", "= entries[3].text source = entries[4].text feat['name'] = name feat['level'] =", "> 5: #break return feats listOfPages = codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for", "do next_sibling here because the source puts PFS links first,", "featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list' featHolder['date'] = datetime.date.today().strftime(\"%B", "tag['id']==\"ctl00_MainContent_TableElement\") rows = table.findAll(lambda tag: tag.name=='tr') t = 0 for", "datetime.date.today().strftime(\"%B %d, %Y\") def get_details(link): res = requests.get(link) res.raise_for_status() soup", "we want to skip over. link = entries[0].find(\"a\").next_sibling.a['href'] #for entry", "traits = entries[2].text prereq = entries[3].text source = entries[4].text feat['name']", "rows = table.findAll(lambda tag: tag.name=='tr') t = 0 for row", "entries is not None: if len(entries) > 0: name =", "feat['benefits'] = source details = get_details(feat['link']) feat['text'] = details feats.append(feat)", "entries[3].text source = entries[4].text feat['name'] = name feat['level'] = level", "= codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for line in listOfPages: featMD = line.split(\",\")", "= {} featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list' featHolder['date']", "\"https://2e.aonprd.com/\" +link feat['prereq'] = prereq feat['benefits'] = source details =", "feat = {} entries = row.find_all(lambda tag: tag.name=='td') if entries", "in listOfPages: featMD = line.split(\",\") print(\"Getting feats for :\", featMD[0],\"This", "to pull out any links detail = ''.join(detailsplit[::2]) #Finally, we", "re.split('<(.*?)>', detailraw) #Now we split it into groups of strings", "of strings seperated by < >, to pull out any", "feats.append(feat) #if t > 5: #break return feats listOfPages =", "the source puts PFS links first, which we want to", "#We do next_sibling here because the source puts PFS links", "t = 0 for row in rows: t += 1", "the meta tag detailsplit = re.split('<(.*?)>', detailraw) #Now we split", "string #print(detail) return detail def get_feats(link): feats = [] res", "requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') table = soup.find(lambda tag:", "tag: tag.name=='td') if entries is not None: if len(entries) >", "'lxml') table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']==\"ctl00_MainContent_TableElement\")", "= traits.split(\", \") feat['link'] = \"https://2e.aonprd.com/\" +link feat['prereq'] = prereq", "= BeautifulSoup(res.text, 'lxml') table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id')", "= table.findAll(lambda tag: tag.name=='tr') t = 0 for row in", "join every other group together (passing over the link groups)", "in rows: t += 1 #print(row) #print(\"-----------------------------------\") feat = {}", "feats listOfPages = codecs.open(\"ancestryFeats.csv\", encoding='utf-8') for line in listOfPages: featMD", "row in rows: t += 1 #print(row) #print(\"-----------------------------------\") feat =", "feat['link'] = \"https://2e.aonprd.com/\" +link feat['prereq'] = prereq feat['benefits'] = source", "soup = BeautifulSoup(res.text, 'lxml') table = soup.find(lambda tag: tag.name=='table' and", "for :\", featMD[0],\"This url:\", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\\n')) json_data =", "every other group together (passing over the link groups) into", "featHolder = {} featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list'" ]
[ "I think this is where i need to add in", "random import secrets secure_random = secrets.SystemRandom()# creates a secure random", "list for each_question in range (0, num_qustion_t_select): # I think", "object. group_of_items = questions_lists num_qustion_t_select = num_question_to_display list_of_random_items = secure_random.sample(group_of_items,", "def Randomise(questions_lists): import random import secrets secure_random = secrets.SystemRandom()# creates", "num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting from strings", "is where i need to add in some information but", "numbers of question and space to answer. print ((\"Q.\"),(each_question +", "to answer. print ((\"Q.\"),(each_question + 1),((list_of_random_items[each_question]))) print ((\"A.\"),(each_question + 1),(\"_______________________\"))", "some kind of structure with numbers of question and space", "a secure random object. group_of_items = questions_lists num_qustion_t_select = num_question_to_display", "think this is where i need to add in some", "add in some information but don't understand. #printing some kind", "group_of_items = questions_lists num_qustion_t_select = num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select)", "this is where i need to add in some information", "with numbers of question and space to answer. print ((\"Q.\"),(each_question", "# randomly selecting from strings within each question list for", "each_question in range (0, num_qustion_t_select): # I think this is", "don't understand. #printing some kind of structure with numbers of", "space to answer. print ((\"Q.\"),(each_question + 1),((list_of_random_items[each_question]))) print ((\"A.\"),(each_question +", "answer. print ((\"Q.\"),(each_question + 1),((list_of_random_items[each_question]))) print ((\"A.\"),(each_question + 1),(\"_______________________\")) print", "secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting from strings within each question", "randomly selecting from strings within each question list for each_question", "each question list for each_question in range (0, num_qustion_t_select): #", "where i need to add in some information but don't", "within each question list for each_question in range (0, num_qustion_t_select):", "strings within each question list for each_question in range (0,", "selecting from strings within each question list for each_question in", "#printing some kind of structure with numbers of question and", "num_qustion_t_select = num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting", "information but don't understand. #printing some kind of structure with", "random object. group_of_items = questions_lists num_qustion_t_select = num_question_to_display list_of_random_items =", "secure random object. group_of_items = questions_lists num_qustion_t_select = num_question_to_display list_of_random_items", "= questions_lists num_qustion_t_select = num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) #", "<reponame>Jahronimo/public_question_book_framework import random def Randomise(questions_lists): import random import secrets secure_random", "from strings within each question list for each_question in range", "i need to add in some information but don't understand.", "= secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting from strings within each", "secrets.SystemRandom()# creates a secure random object. group_of_items = questions_lists num_qustion_t_select", "range (0, num_qustion_t_select): # I think this is where i", "question list for each_question in range (0, num_qustion_t_select): # I", "structure with numbers of question and space to answer. print", "creates a secure random object. group_of_items = questions_lists num_qustion_t_select =", "num_qustion_t_select): # I think this is where i need to", "secure_random = secrets.SystemRandom()# creates a secure random object. group_of_items =", "some information but don't understand. #printing some kind of structure", "# I think this is where i need to add", "import random import secrets secure_random = secrets.SystemRandom()# creates a secure", "of structure with numbers of question and space to answer.", "to add in some information but don't understand. #printing some", "(0, num_qustion_t_select): # I think this is where i need", "= secrets.SystemRandom()# creates a secure random object. group_of_items = questions_lists", "random def Randomise(questions_lists): import random import secrets secure_random = secrets.SystemRandom()#", "but don't understand. #printing some kind of structure with numbers", "in range (0, num_qustion_t_select): # I think this is where", "understand. #printing some kind of structure with numbers of question", "questions_lists num_qustion_t_select = num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) # randomly", "secrets secure_random = secrets.SystemRandom()# creates a secure random object. group_of_items", "in some information but don't understand. #printing some kind of", "for each_question in range (0, num_qustion_t_select): # I think this", "of question and space to answer. print ((\"Q.\"),(each_question + 1),((list_of_random_items[each_question])))", "list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting from strings within", "question and space to answer. print ((\"Q.\"),(each_question + 1),((list_of_random_items[each_question]))) print", "import secrets secure_random = secrets.SystemRandom()# creates a secure random object.", "print ((\"Q.\"),(each_question + 1),((list_of_random_items[each_question]))) print ((\"A.\"),(each_question + 1),(\"_______________________\")) print (\"\\n\")", "import random def Randomise(questions_lists): import random import secrets secure_random =", "need to add in some information but don't understand. #printing", "= num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting from", "and space to answer. print ((\"Q.\"),(each_question + 1),((list_of_random_items[each_question]))) print ((\"A.\"),(each_question", "kind of structure with numbers of question and space to", "num_qustion_t_select) # randomly selecting from strings within each question list", "Randomise(questions_lists): import random import secrets secure_random = secrets.SystemRandom()# creates a" ]
[ "job_data = fpl(\"indeed\", next_page=\"/jobs?q=Data+Scientist\" \"&l=Texas&start=10\") if __name__ == '__main__': unittest.main()", "scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected = (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2 = scrape.build_url(\"indeed\",", "(\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2 = scrape.build_url(\"indeed\", job=\"Data Scientist\", state=\"Texas\") expected2 =", "self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data =", "self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data = fpl(\"indeed\", next_page=\"/jobs?q=Data+Scientist\"", "= (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected) self.assertEqual(url2, expected2) def test_fetch_page(self): fpl", "fpl(\"indeed\", job=\"Data Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict)", "self.assertEqual(url2, expected2) def test_fetch_page(self): fpl = scrape.fetch_page_listings job_data = fpl(\"indeed\",", "job=\"Data Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1],", "state=\"Texas\") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data", "job_data = fpl(\"indeed\", job=\"Data Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple)", "state=\"Texas\") expected2 = (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected) self.assertEqual(url2, expected2) def", "TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url = scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected =", "import scrape class TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url = scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\",", "unittest import scrape class TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url = scrape.build_url(\"indeed\",", "expected = (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2 = scrape.build_url(\"indeed\", job=\"Data Scientist\", state=\"Texas\")", "test_build_url(self): url = scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected = (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\")", "= scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected = (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2 =", "job=\"Data Scientist\", state=\"Texas\") expected2 = (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected) self.assertEqual(url2,", "expected2) def test_fetch_page(self): fpl = scrape.fetch_page_listings job_data = fpl(\"indeed\", job=\"Data", "dict) self.assertIsInstance(job_data[1], str) job_data = fpl(\"indeed\", next_page=\"/jobs?q=Data+Scientist\" \"&l=Texas&start=10\") if __name__", "str) job_data = fpl(\"indeed\", next_page=\"/jobs?q=Data+Scientist\" \"&l=Texas&start=10\") if __name__ == '__main__':", "Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str)", "= (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2 = scrape.build_url(\"indeed\", job=\"Data Scientist\", state=\"Texas\") expected2", "self.assertEqual(url, expected) self.assertEqual(url2, expected2) def test_fetch_page(self): fpl = scrape.fetch_page_listings job_data", "fpl = scrape.fetch_page_listings job_data = fpl(\"indeed\", job=\"Data Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data),", "\"jobs?q=Data+Scientist&l=Texas&start=10\") url2 = scrape.build_url(\"indeed\", job=\"Data Scientist\", state=\"Texas\") expected2 = (\"https://www.indeed.com/\"", "expected) self.assertEqual(url2, expected2) def test_fetch_page(self): fpl = scrape.fetch_page_listings job_data =", "scrape class TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url = scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True)", "= scrape.fetch_page_listings job_data = fpl(\"indeed\", job=\"Data Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data), 0)", "def test_fetch_page(self): fpl = scrape.fetch_page_listings job_data = fpl(\"indeed\", job=\"Data Scientist\",", "class TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url = scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected", "url = scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected = (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2", "Scientist\", state=\"Texas\") expected2 = (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected) self.assertEqual(url2, expected2)", "join_next=True) expected = (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2 = scrape.build_url(\"indeed\", job=\"Data Scientist\",", "url2 = scrape.build_url(\"indeed\", job=\"Data Scientist\", state=\"Texas\") expected2 = (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\")", "scrape.fetch_page_listings job_data = fpl(\"indeed\", job=\"Data Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data,", "self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data = fpl(\"indeed\", next_page=\"/jobs?q=Data+Scientist\" \"&l=Texas&start=10\") if", "expected2 = (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected) self.assertEqual(url2, expected2) def test_fetch_page(self):", "\"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected = (\"https://www.indeed.com/\" \"jobs?q=Data+Scientist&l=Texas&start=10\") url2 = scrape.build_url(\"indeed\", job=\"Data", "0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data = fpl(\"indeed\",", "self.assertIsInstance(job_data[1], str) job_data = fpl(\"indeed\", next_page=\"/jobs?q=Data+Scientist\" \"&l=Texas&start=10\") if __name__ ==", "scrape.build_url(\"indeed\", job=\"Data Scientist\", state=\"Texas\") expected2 = (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected)", "= scrape.build_url(\"indeed\", job=\"Data Scientist\", state=\"Texas\") expected2 = (\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url,", "import unittest import scrape class TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url =", "def test_build_url(self): url = scrape.build_url(\"indeed\", \"/jobs?q=Data+Scientist&l=Texas&start=10\", join_next=True) expected = (\"https://www.indeed.com/\"", "test_fetch_page(self): fpl = scrape.fetch_page_listings job_data = fpl(\"indeed\", job=\"Data Scientist\", state=\"Texas\")", "(\"https://www.indeed.com/\" \"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected) self.assertEqual(url2, expected2) def test_fetch_page(self): fpl =", "= fpl(\"indeed\", job=\"Data Scientist\", state=\"Texas\") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0],", "\"jobs?q=Data%20Scientist&l=Texas&start=0\") self.assertEqual(url, expected) self.assertEqual(url2, expected2) def test_fetch_page(self): fpl = scrape.fetch_page_listings", "tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data = fpl(\"indeed\", next_page=\"/jobs?q=Data+Scientist\" \"&l=Texas&start=10\")" ]
[ "p| /}\") print(\"( 0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"` |\") print(\"||_/=\\\\\\\\__|\") if __name__", "def main(): print(\"|\\_/|\") print(\"|q p| /}\") print(\"( 0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"`", "main(): print(\"|\\_/|\") print(\"|q p| /}\") print(\"( 0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"` |\")", "print(\"|q p| /}\") print(\"( 0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"` |\") print(\"||_/=\\\\\\\\__|\") if", "/}\") print(\"( 0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"` |\") print(\"||_/=\\\\\\\\__|\") if __name__ ==", "0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"` |\") print(\"||_/=\\\\\\\\__|\") if __name__ == \"__main__\": main()", "print(\"|\\_/|\") print(\"|q p| /}\") print(\"( 0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"` |\") print(\"||_/=\\\\\\\\__|\")", "print(\"( 0 )\\\"\\\"\\\"\\\\\") print(\"|\\\"^\\\"` |\") print(\"||_/=\\\\\\\\__|\") if __name__ == \"__main__\":" ]
[ "= train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag =", "from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble", "0 for recipe in jsonData: if \"cuisine\" in recipe: train_labels[c]", "in recipe: train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]] c = c+1 end =", "rf_predict = randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end", "3. Bagging 4. Random Forests @author: <NAME> \"\"\" import numpy", "sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import", ": \", metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy of Random Forests : \",", "Random Forests @author: <NAME> \"\"\" import numpy as np import", "= bagging.predict(test_data_bag) end = time.time() print \"Time Taken to Test", "= ingredient_map[ingredient] + 1 else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s", "on Sat Dec 26 13:20:45 2015 Code for Kaggle What's", "classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution(): print \"-----------", "\",end-start for cuisine in cuisine_set: cuisine_numerical_map[cuisine] = c c =", "s + \" \" + ingredient ingredient_list.append(s) end = time.time()", "printIngredientDistribution(): print \"----------- Distribution of the Recipe Ingredients ------------------\" for", "Kaggle What's Cooking Competition It uses the following classifiers with", "Features : \", end-start test_labels = train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000])", "def printCuisineDistribution(): print \"----------- Distribution of the Cuisines ------------------\" for", "......\" start = time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list)", "= json.loads(content) cuisine_set = set([]) ingredient_set = set([]) cuisine_map =", "of Test Labels ....\" start = time.time() ada_predict = adaboost.predict(test_data_bag)", "CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') # Create the", "jsonData = json.loads(content) cuisine_set = set([]) ingredient_set = set([]) cuisine_map", "utf-8 -*- \"\"\" Created on Sat Dec 26 13:20:45 2015", "train all Ensemble Models : \", end-start print \"Starting Prediction", ": \", metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy of Bagging : \", metrics.accuracy_score(test_labels,bagging_predict)", "\"\" if recipe[\"cuisine\"] in cuisine_set: cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]] + 1", "end-start test_labels = train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000])", "It uses the following classifiers with tf-idf,hashvectors and bag_of_words approach", "metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy of Random Forests : \", metrics.accuracy_score(test_labels,rf_predict) print", "Forests @author: <NAME> \"\"\" import numpy as np import time", "np import time import json import ClassificationUtils from sklearn.feature_extraction.text import", "= {} ingredient_list = list([]) c = 0 print \"Size", "c = 0 print \"Size of the data set :", "end = time.time() print \"Time Taken to Test the models", "train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000])", "Extra Trees : \", metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy of Bagging :", "recipe: s = \"\" if recipe[\"cuisine\"] in cuisine_set: cuisine_map[recipe[\"cuisine\"]] =", "and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution(): print", "\", end-start print \"Accuracy of AdaBoost Algorithm : \", metrics.accuracy_score(test_labels,ada_predict)", "\", end-start test_labels = train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash =", "\"Accuracy of Random Forests : \", metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy of", "train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c", "numpy as np import time import json import ClassificationUtils from", "the data set : \", len(jsonData) print \"Starting Loading of", "= time.time() for recipe in jsonData: if \"cuisine\" in recipe:", "= cuisine_numerical_map[recipe[\"cuisine\"]] c = c+1 end = time.time() print \"Time", "ingredient_map.keys(): print key, \" : \" ,ingredient_map[key] def printCuisineDistribution(): print", "printCuisineDistribution(): print \"----------- Distribution of the Cuisines ------------------\" for key", "metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf model and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest)", "ingredient_numerical_map = {} ingredient_map = {} ingredient_list = list([]) c", "\"Size of the data set : \", len(jsonData) print \"Starting", "ingredient_list.append(s) end = time.time() print \"Time Taken to Load the", "Feature Extracting ......\" start = time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf", "test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print \"Starting Training of", "c = 0 for ingredient in ingredient_set: ingredient_numerical_map[ingredient] = c", "bagging_predict = bagging.predict(test_data_bag) end = time.time() print \"Time Taken to", "# Create the feature extractors bag_of_words = CountVectorizer(stop_words='english') tfidf =", "Taken to Test the models : \", end-start print \"Accuracy", "in recipe[\"ingredients\"]: if ingredient in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] +", "if \"cuisine\" in recipe: train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]] c = c+1", "Create the feature extractors bag_of_words = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english')", "What's Cooking Competition It uses the following classifiers with tf-idf,hashvectors", "ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution(): print \"----------- Distribution of the Recipe", "of the data set : \", len(jsonData) print \"Starting Loading", "hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print \"Starting Training of Models for", "key in ingredient_map.keys(): print key, \" : \" ,ingredient_map[key] def", "the feature extractors bag_of_words = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec", "ExtraTreesClassifier() bagging = BaggingClassifier() filepath = \"train.json\" f = open(filepath,\"r\")", "+ 1 else: cuisine_map[recipe[\"cuisine\"]] = 1 cuisine_set.add(recipe[\"cuisine\"]) for ingredient in", "ingredient_set: ingredient_numerical_map[ingredient] = c c = c+1 print \"Starting Feature", "and bag_of_words approach 1. Adaboost 2. Extratrees 3. Bagging 4.", "c = c+1 c = 0 for ingredient in ingredient_set:", "+ 1 else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s = s", "to train all Ensemble Models : \", end-start print \"Starting", "time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list)", "approach 1. Adaboost 2. Extratrees 3. Bagging 4. Random Forests", "\", metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf model and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost)", "Dec 26 13:20:45 2015 Code for Kaggle What's Cooking Competition", "if ingredient in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] + 1 else:", "1 ingredient_set.add(ingredient) s = s + \" \" + ingredient", "extractors bag_of_words = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english')", ": \", end-start test_labels = train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash", "Taken to Train Extract Different Features : \", end-start test_labels", "= 0 print \"Size of the data set : \",", "= bag_of_words.transform(ingredient_list[1:30000]) print \"Starting Training of Models for Hash Vectorizer", "Create the Classifier objects adaboost = AdaBoostClassifier() randomforest = RandomForestClassifier()", "Train Extract Different Features : \", end-start test_labels = train_labels[1:30000]", "list([]) c = 0 print \"Size of the data set", "from sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble", "Created on Sat Dec 26 13:20:45 2015 Code for Kaggle", "= s + \" \" + ingredient ingredient_list.append(s) end =", ": \" ,ingredient_map[key] def printCuisineDistribution(): print \"----------- Distribution of the", "Loading of Data Set....\" start = time.time() for recipe in", "randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print \"Time Taken to train all", "for cuisine in cuisine_set: cuisine_numerical_map[cuisine] = c c = c+1", "Different Features : \", end-start test_labels = train_labels[1:30000] test_data_tfidf =", "\"cuisine\" in recipe: train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]] c = c+1 end", "with tf-idf,hashvectors and bag_of_words approach 1. Adaboost 2. Extratrees 3.", "= \"train.json\" f = open(filepath,\"r\") content = f.read() jsonData =", "= 0 for recipe in jsonData: if \"cuisine\" in recipe:", "the following classifiers with tf-idf,hashvectors and bag_of_words approach 1. Adaboost", "= 0 for ingredient in ingredient_set: ingredient_numerical_map[ingredient] = c c", "Hash Vectorizer Feature.....\" start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels)", "c = c+1 print \"Starting Feature Extracting ......\" start =", "= HashingVectorizer(stop_words='english') # Create the Classifier objects adaboost = AdaBoostClassifier()", "print \"Time Taken to Train Extract Different Features : \",", "\"----------- Distribution of the Recipe Ingredients ------------------\" for key in", "Code for Kaggle What's Cooking Competition It uses the following", "s = \"\" if recipe[\"cuisine\"] in cuisine_set: cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]]", "cuisine in cuisine_set: cuisine_numerical_map[cuisine] = c c = c+1 c", "metrics # Create the feature extractors bag_of_words = CountVectorizer(stop_words='english') tfidf", "train_data_bag = bag_of_words.fit_transform(ingredient_list) c = 0 for recipe in jsonData:", "for recipe in jsonData: if \"cuisine\" in recipe: s =", "-*- coding: utf-8 -*- \"\"\" Created on Sat Dec 26", "start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print \"Time", "Prediction of Test Labels ....\" start = time.time() ada_predict =", "all Ensemble Models : \", end-start print \"Starting Prediction of", "in cuisine_set: cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]] + 1 else: cuisine_map[recipe[\"cuisine\"]] =", "\", metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy of Extra Trees : \", metrics.accuracy_score(test_labels,extree_predict)", "Ensemble Models : \", end-start print \"Starting Prediction of Test", "tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c = 0", "Test Labels ....\" start = time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict", "from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier from sklearn", "train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c = 0 for", "Extract Different Features : \", end-start test_labels = train_labels[1:30000] test_data_tfidf", "\"Accuracy of Bagging : \", metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf", "time.time() for recipe in jsonData: if \"cuisine\" in recipe: s", "\"Starting Feature Extracting ......\" start = time.time() train_labels = np.zeros(len(ingredient_list))", "c+1 c = 0 for ingredient in ingredient_set: ingredient_numerical_map[ingredient] =", "Taken to train all Ensemble Models : \", end-start print", "import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer", "<reponame>rupakc/Kaggle-Compendium # -*- coding: utf-8 -*- \"\"\" Created on Sat", "{} ingredient_map = {} ingredient_list = list([]) c = 0", "\"\"\" import numpy as np import time import json import", "test_labels = train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag", "print key, \" : \" ,ingredient_map[key] def printCuisineDistribution(): print \"-----------", "ingredient_set.add(ingredient) s = s + \" \" + ingredient ingredient_list.append(s)", "end-start print \"Starting Prediction of Test Labels ....\" start =", "# Create the Classifier objects adaboost = AdaBoostClassifier() randomforest =", "cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]] + 1 else: cuisine_map[recipe[\"cuisine\"]] = 1 cuisine_set.add(recipe[\"cuisine\"])", "ingredient in recipe[\"ingredients\"]: if ingredient in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient]", "ingredient in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] + 1 else: ingredient_map[ingredient]", "time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print \"Time Taken to", "print \"Accuracy of Bagging : \", metrics.accuracy_score(test_labels,bagging_predict) # Saving the", "= 1 cuisine_set.add(recipe[\"cuisine\"]) for ingredient in recipe[\"ingredients\"]: if ingredient in", "recipe in jsonData: if \"cuisine\" in recipe: train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]]", "extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print \"Time Taken to train all Ensemble", "f.read() jsonData = json.loads(content) cuisine_set = set([]) ingredient_set = set([])", "import json import ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text", "\", len(jsonData) print \"Starting Loading of Data Set....\" start =", "BaggingClassifier from sklearn import metrics # Create the feature extractors", "data set : \", len(jsonData) print \"Starting Loading of Data", "Random Forests : \", metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy of Extra Trees", "coding: utf-8 -*- \"\"\" Created on Sat Dec 26 13:20:45", "ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s = s + \" \"", "the Classifier objects adaboost = AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees", "time import json import ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer from", "Set....\" start = time.time() for recipe in jsonData: if \"cuisine\"", "= tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c =", "0 print \"Size of the data set : \", len(jsonData)", "= RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging = BaggingClassifier() filepath =", "ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution(): print \"----------- Distribution of", "RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging = BaggingClassifier() filepath = \"train.json\"", "= {} ingredient_map = {} ingredient_list = list([]) c =", "print \"Time Taken to Test the models : \", end-start", "= f.read() jsonData = json.loads(content) cuisine_set = set([]) ingredient_set =", ": \", end-start print \"Accuracy of AdaBoost Algorithm : \",", "= c+1 print \"Starting Feature Extracting ......\" start = time.time()", "json import ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import", "content = f.read() jsonData = json.loads(content) cuisine_set = set([]) ingredient_set", "of Data Set....\" start = time.time() for recipe in jsonData:", "\" \" + ingredient ingredient_list.append(s) end = time.time() print \"Time", "Extracting ......\" start = time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf =", "of Extra Trees : \", metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy of Bagging", "from sklearn import metrics # Create the feature extractors bag_of_words", "1 else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s = s +", "from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble", "end = time.time() print \"Time Taken to Load the Dataset", "print \"Time Taken to train all Ensemble Models : \",", "print \"Accuracy of Random Forests : \", metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy", "Bagging : \", metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf model and", "test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print", "= CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') # Create", "= bag_of_words.fit_transform(ingredient_list) c = 0 for recipe in jsonData: if", "print \"Size of the data set : \", len(jsonData) print", "bag_of_words = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') #", "sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble import", "json.loads(content) cuisine_set = set([]) ingredient_set = set([]) cuisine_map = {}", "import numpy as np import time import json import ClassificationUtils", "set([]) ingredient_set = set([]) cuisine_map = {} cuisine_numerical_map = {}", "import metrics # Create the feature extractors bag_of_words = CountVectorizer(stop_words='english')", "= time.time() print \"Time Taken to Load the Dataset :", "time.time() print \"Time Taken to Load the Dataset : \",end-start", "\", metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy of Bagging : \", metrics.accuracy_score(test_labels,bagging_predict) #", "------------------\" for key in ingredient_map.keys(): print key, \" : \"", "model and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution():", "in recipe: s = \"\" if recipe[\"cuisine\"] in cuisine_set: cuisine_map[recipe[\"cuisine\"]]", "Adaboost 2. Extratrees 3. Bagging 4. Random Forests @author: <NAME>", "= extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end = time.time() print \"Time", ": \", len(jsonData) print \"Starting Loading of Data Set....\" start", "BaggingClassifier() filepath = \"train.json\" f = open(filepath,\"r\") content = f.read()", "ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution(): print \"----------- Distribution", "Dataset : \",end-start for cuisine in cuisine_set: cuisine_numerical_map[cuisine] = c", "Trees : \", metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy of Bagging : \",", "cuisine_numerical_map[cuisine] = c c = c+1 c = 0 for", "CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from", "\"Time Taken to Load the Dataset : \",end-start for cuisine", "metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy of Bagging : \", metrics.accuracy_score(test_labels,bagging_predict) # Saving", "to Test the models : \", end-start print \"Accuracy of", "cuisine_set.add(recipe[\"cuisine\"]) for ingredient in recipe[\"ingredients\"]: if ingredient in ingredient_set: ingredient_map[ingredient]", "models : \", end-start print \"Accuracy of AdaBoost Algorithm :", "Classifier objects adaboost = AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees =", "Sat Dec 26 13:20:45 2015 Code for Kaggle What's Cooking", "in cuisine_set: cuisine_numerical_map[cuisine] = c c = c+1 c =", "time.time() print \"Time Taken to Train Extract Different Features :", "set : \", len(jsonData) print \"Starting Loading of Data Set....\"", "c+1 print \"Starting Feature Extracting ......\" start = time.time() train_labels", "Algorithm : \", metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy of Random Forests :", "-*- \"\"\" Created on Sat Dec 26 13:20:45 2015 Code", "in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] + 1 else: ingredient_map[ingredient] =", "in ingredient_map.keys(): print key, \" : \" ,ingredient_map[key] def printCuisineDistribution():", "= time.time() print \"Time Taken to Test the models :", "sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import", "+ \" \" + ingredient ingredient_list.append(s) end = time.time() print", ": \", metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy of Extra Trees : \",", "classifiers with tf-idf,hashvectors and bag_of_words approach 1. Adaboost 2. Extratrees", "Cooking Competition It uses the following classifiers with tf-idf,hashvectors and", "ingredient_list = list([]) c = 0 print \"Size of the", "c c = c+1 print \"Starting Feature Extracting ......\" start", "\" : \" ,ingredient_map[key] def printCuisineDistribution(): print \"----------- Distribution of", "bagging = BaggingClassifier() filepath = \"train.json\" f = open(filepath,\"r\") content", "time.time() print \"Time Taken to Test the models : \",", "for ingredient in ingredient_set: ingredient_numerical_map[ingredient] = c c = c+1", "randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end = time.time()", "else: cuisine_map[recipe[\"cuisine\"]] = 1 cuisine_set.add(recipe[\"cuisine\"]) for ingredient in recipe[\"ingredients\"]: if", "ingredient_numerical_map[ingredient] = c c = c+1 print \"Starting Feature Extracting", "= time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict =", "c = c+1 end = time.time() print \"Time Taken to", "Extratrees 3. Bagging 4. Random Forests @author: <NAME> \"\"\" import", ": \", end-start print \"Starting Prediction of Test Labels ....\"", "Cuisines ------------------\" for key in cuisine_map.keys(): print key, \" :", "\"Time Taken to train all Ensemble Models : \", end-start", "0 for ingredient in ingredient_set: ingredient_numerical_map[ingredient] = c c =", "+ ingredient ingredient_list.append(s) end = time.time() print \"Time Taken to", "\"Starting Prediction of Test Labels ....\" start = time.time() ada_predict", "f = open(filepath,\"r\") content = f.read() jsonData = json.loads(content) cuisine_set", "Taken to Load the Dataset : \",end-start for cuisine in", "adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print \"Time Taken to train", "= ExtraTreesClassifier() bagging = BaggingClassifier() filepath = \"train.json\" f =", "of the Cuisines ------------------\" for key in cuisine_map.keys(): print key,", "if recipe[\"cuisine\"] in cuisine_set: cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]] + 1 else:", "c = 0 for recipe in jsonData: if \"cuisine\" in", "time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag)", "c c = c+1 c = 0 for ingredient in", "{} cuisine_numerical_map = {} ingredient_numerical_map = {} ingredient_map = {}", "1 cuisine_set.add(recipe[\"cuisine\"]) for ingredient in recipe[\"ingredients\"]: if ingredient in ingredient_set:", "Saving the tf-idf model and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging)", "s = s + \" \" + ingredient ingredient_list.append(s) end", "sklearn.ensemble import BaggingClassifier from sklearn import metrics # Create the", "the models : \", end-start print \"Accuracy of AdaBoost Algorithm", "AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging = BaggingClassifier()", "tf-idf model and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def", "for recipe in jsonData: if \"cuisine\" in recipe: train_labels[c] =", "end=time.time() print \"Time Taken to train all Ensemble Models :", "jsonData: if \"cuisine\" in recipe: s = \"\" if recipe[\"cuisine\"]", "jsonData: if \"cuisine\" in recipe: train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]] c =", "feature extractors bag_of_words = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec =", "cuisine_map[recipe[\"cuisine\"]] + 1 else: cuisine_map[recipe[\"cuisine\"]] = 1 cuisine_set.add(recipe[\"cuisine\"]) for ingredient", "recipe: train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]] c = c+1 end = time.time()", "bag_of_words.transform(ingredient_list[1:30000]) print \"Starting Training of Models for Hash Vectorizer Feature.....\"", "1. Adaboost 2. Extratrees 3. Bagging 4. Random Forests @author:", "ingredient_map[ingredient] + 1 else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s =", "Models : \", end-start print \"Starting Prediction of Test Labels", "sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import", "# Saving the tf-idf model and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees)", "uses the following classifiers with tf-idf,hashvectors and bag_of_words approach 1.", "= open(filepath,\"r\") content = f.read() jsonData = json.loads(content) cuisine_set =", "ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier from sklearn import metrics #", "test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print \"Starting Training of Models for Hash", "end-start print \"Accuracy of AdaBoost Algorithm : \", metrics.accuracy_score(test_labels,ada_predict) print", "following classifiers with tf-idf,hashvectors and bag_of_words approach 1. Adaboost 2.", "import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier", "4. Random Forests @author: <NAME> \"\"\" import numpy as np", "\"Accuracy of AdaBoost Algorithm : \", metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy of", "\"\"\" Created on Sat Dec 26 13:20:45 2015 Code for", "cuisine_numerical_map[recipe[\"cuisine\"]] c = c+1 end = time.time() print \"Time Taken", "the Dataset : \",end-start for cuisine in cuisine_set: cuisine_numerical_map[cuisine] =", "the Recipe Ingredients ------------------\" for key in ingredient_map.keys(): print key,", "of Bagging : \", metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf model", "of AdaBoost Algorithm : \", metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy of Random", "print \"Accuracy of Extra Trees : \", metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy", "= hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c = 0 for recipe", "2015 Code for Kaggle What's Cooking Competition It uses the", ": \", metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf model and classifiers", "adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag)", "= AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging =", "Bagging 4. Random Forests @author: <NAME> \"\"\" import numpy as", "cuisine_set: cuisine_numerical_map[cuisine] = c c = c+1 c = 0", "26 13:20:45 2015 Code for Kaggle What's Cooking Competition It", "ingredient_map = {} ingredient_list = list([]) c = 0 print", "HashingVectorizer(stop_words='english') # Create the Classifier objects adaboost = AdaBoostClassifier() randomforest", "ingredient_map[ingredient] = ingredient_map[ingredient] + 1 else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient)", "import time import json import ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer", "the Cuisines ------------------\" for key in cuisine_map.keys(): print key, \"", "open(filepath,\"r\") content = f.read() jsonData = json.loads(content) cuisine_set = set([])", "bag_of_words.fit_transform(ingredient_list) c = 0 for recipe in jsonData: if \"cuisine\"", "metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy of Extra Trees : \", metrics.accuracy_score(test_labels,extree_predict) print", "recipe[\"ingredients\"]: if ingredient in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] + 1", "tf-idf,hashvectors and bag_of_words approach 1. Adaboost 2. Extratrees 3. Bagging", "ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from", "extree_predict = extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end = time.time() print", "AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from", "cuisine_map[recipe[\"cuisine\"]] = 1 cuisine_set.add(recipe[\"cuisine\"]) for ingredient in recipe[\"ingredients\"]: if ingredient", "= np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag =", "= hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print \"Starting Training of Models", "1 else: cuisine_map[recipe[\"cuisine\"]] = 1 cuisine_set.add(recipe[\"cuisine\"]) for ingredient in recipe[\"ingredients\"]:", "13:20:45 2015 Code for Kaggle What's Cooking Competition It uses", "start = time.time() for recipe in jsonData: if \"cuisine\" in", "c+1 end = time.time() print \"Time Taken to Train Extract", "= set([]) cuisine_map = {} cuisine_numerical_map = {} ingredient_numerical_map =", "for key in ingredient_map.keys(): print key, \" : \" ,ingredient_map[key]", "key, \" : \" ,ingredient_map[key] def printCuisineDistribution(): print \"----------- Distribution", "def printIngredientDistribution(): print \"----------- Distribution of the Recipe Ingredients ------------------\"", "if \"cuisine\" in recipe: s = \"\" if recipe[\"cuisine\"] in", "print \"Time Taken to Load the Dataset : \",end-start for", "# -*- coding: utf-8 -*- \"\"\" Created on Sat Dec", "\"Time Taken to Train Extract Different Features : \", end-start", "Forests : \", metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy of Extra Trees :", "train_labels = np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag", "from sklearn.ensemble import BaggingClassifier from sklearn import metrics # Create", "Ingredients ------------------\" for key in ingredient_map.keys(): print key, \" :", "= \"\" if recipe[\"cuisine\"] in cuisine_set: cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]] +", "tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print \"Starting Training", "print \"Starting Training of Models for Hash Vectorizer Feature.....\" start", "for ingredient in recipe[\"ingredients\"]: if ingredient in ingredient_set: ingredient_map[ingredient] =", "the tf-idf model and classifiers ClassificationUtils.save_classifier(\"ada_bag_cook.pickle\",adaboost) ClassificationUtils.save_classifier(\"rf_bag_cook.pickle\",randomforest) ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf)", "\" + ingredient ingredient_list.append(s) end = time.time() print \"Time Taken", "Competition It uses the following classifiers with tf-idf,hashvectors and bag_of_words", "\"Time Taken to Test the models : \", end-start print", "tfidf = TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') # Create the Classifier", "recipe[\"cuisine\"] in cuisine_set: cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]] + 1 else: cuisine_map[recipe[\"cuisine\"]]", "import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble import AdaBoostClassifier", "{} ingredient_list = list([]) c = 0 print \"Size of", "= randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end =", "\" ,ingredient_map[key] def printCuisineDistribution(): print \"----------- Distribution of the Cuisines", "Load the Dataset : \",end-start for cuisine in cuisine_set: cuisine_numerical_map[cuisine]", "RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier from", "bagging.fit(train_data_bag,train_labels) end=time.time() print \"Time Taken to train all Ensemble Models", "of the Recipe Ingredients ------------------\" for key in ingredient_map.keys(): print", "print \"----------- Distribution of the Cuisines ------------------\" for key in", "= list([]) c = 0 print \"Size of the data", "Vectorizer Feature.....\" start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time()", "set([]) cuisine_map = {} cuisine_numerical_map = {} ingredient_numerical_map = {}", "ada_predict = adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict", "= 1 ingredient_set.add(ingredient) s = s + \" \" +", "= time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print \"Time Taken", "print \"Accuracy of AdaBoost Algorithm : \", metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy", "recipe in jsonData: if \"cuisine\" in recipe: s = \"\"", "\"Accuracy of Extra Trees : \", metrics.accuracy_score(test_labels,extree_predict) print \"Accuracy of", "of Models for Hash Vectorizer Feature.....\" start = time.time() adaboost.fit(train_data_bag,train_labels)", "adaboost = AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging", ": \",end-start for cuisine in cuisine_set: cuisine_numerical_map[cuisine] = c c", "for key in cuisine_map.keys(): print key, \" : \" ,cuisine_map[key]", "TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') # Create the Classifier objects adaboost", "start = time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash", "hashvec = HashingVectorizer(stop_words='english') # Create the Classifier objects adaboost =", "import ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer", "= c+1 end = time.time() print \"Time Taken to Train", "= cuisine_map[recipe[\"cuisine\"]] + 1 else: cuisine_map[recipe[\"cuisine\"]] = 1 cuisine_set.add(recipe[\"cuisine\"]) for", "= tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print \"Starting", "\"train.json\" f = open(filepath,\"r\") content = f.read() jsonData = json.loads(content)", "Recipe Ingredients ------------------\" for key in ingredient_map.keys(): print key, \"", "Data Set....\" start = time.time() for recipe in jsonData: if", "filepath = \"train.json\" f = open(filepath,\"r\") content = f.read() jsonData", "for Kaggle What's Cooking Competition It uses the following classifiers", "extratrees = ExtraTreesClassifier() bagging = BaggingClassifier() filepath = \"train.json\" f", "= BaggingClassifier() filepath = \"train.json\" f = open(filepath,\"r\") content =", "\", metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy of Random Forests : \", metrics.accuracy_score(test_labels,rf_predict)", "= {} ingredient_numerical_map = {} ingredient_map = {} ingredient_list =", "= {} cuisine_numerical_map = {} ingredient_numerical_map = {} ingredient_map =", "= c c = c+1 print \"Starting Feature Extracting ......\"", "as np import time import json import ClassificationUtils from sklearn.feature_extraction.text", "len(jsonData) print \"Starting Loading of Data Set....\" start = time.time()", "{} ingredient_numerical_map = {} ingredient_map = {} ingredient_list = list([])", "= set([]) ingredient_set = set([]) cuisine_map = {} cuisine_numerical_map =", "import HashingVectorizer from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier", "ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution(): print \"----------- Distribution of the Recipe Ingredients", "bag_of_words approach 1. Adaboost 2. Extratrees 3. Bagging 4. Random", "print \"Starting Feature Extracting ......\" start = time.time() train_labels =", "Labels ....\" start = time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict =", "from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text", "ingredient ingredient_list.append(s) end = time.time() print \"Time Taken to Load", "= time.time() print \"Time Taken to Train Extract Different Features", "Training of Models for Hash Vectorizer Feature.....\" start = time.time()", "else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s = s + \"", "sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import", "hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c = 0 for recipe in", "cuisine_set: cuisine_map[recipe[\"cuisine\"]] = cuisine_map[recipe[\"cuisine\"]] + 1 else: cuisine_map[recipe[\"cuisine\"]] = 1", "@author: <NAME> \"\"\" import numpy as np import time import", "<NAME> \"\"\" import numpy as np import time import json", "Distribution of the Cuisines ------------------\" for key in cuisine_map.keys(): print", "ingredient_set = set([]) cuisine_map = {} cuisine_numerical_map = {} ingredient_numerical_map", "cuisine_numerical_map = {} ingredient_numerical_map = {} ingredient_map = {} ingredient_list", "------------------\" for key in cuisine_map.keys(): print key, \" : \"", "\"cuisine\" in recipe: s = \"\" if recipe[\"cuisine\"] in cuisine_set:", "ClassificationUtils.save_classifier(\"extree_bag_cook.pickle\",extratrees) ClassificationUtils.save_classifier(\"bagging_bag_cook.pickle\",bagging) ClassificationUtils.save_classifier(\"bag_of_words.pickle\",tfidf) def printIngredientDistribution(): print \"----------- Distribution of the", "extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end = time.time() print \"Time Taken", "= c+1 c = 0 for ingredient in ingredient_set: ingredient_numerical_map[ingredient]", "print \"Starting Prediction of Test Labels ....\" start = time.time()", "in ingredient_set: ingredient_numerical_map[ingredient] = c c = c+1 print \"Starting", "Feature.....\" start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print", "sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier from sklearn import", "randomforest = RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging = BaggingClassifier() filepath", "in jsonData: if \"cuisine\" in recipe: s = \"\" if", "\"----------- Distribution of the Cuisines ------------------\" for key in cuisine_map.keys():", ",ingredient_map[key] def printCuisineDistribution(): print \"----------- Distribution of the Cuisines ------------------\"", "= adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict =", "from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble", "print \"----------- Distribution of the Recipe Ingredients ------------------\" for key", "sklearn import metrics # Create the feature extractors bag_of_words =", "TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble import AdaBoostClassifier from", "to Load the Dataset : \",end-start for cuisine in cuisine_set:", "....\" start = time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag)", "import ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier from sklearn import metrics", "Models for Hash Vectorizer Feature.....\" start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels)", "Distribution of the Recipe Ingredients ------------------\" for key in ingredient_map.keys():", "= time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash =", "import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier", "import BaggingClassifier from sklearn import metrics # Create the feature", "2. Extratrees 3. Bagging 4. Random Forests @author: <NAME> \"\"\"", "Test the models : \", end-start print \"Accuracy of AdaBoost", "of Random Forests : \", metrics.accuracy_score(test_labels,rf_predict) print \"Accuracy of Extra", "AdaBoost Algorithm : \", metrics.accuracy_score(test_labels,ada_predict) print \"Accuracy of Random Forests", "end = time.time() print \"Time Taken to Train Extract Different", "= c c = c+1 c = 0 for ingredient", "cuisine_set = set([]) ingredient_set = set([]) cuisine_map = {} cuisine_numerical_map", "= TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') # Create the Classifier objects", "objects adaboost = AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees = ExtraTreesClassifier()", "print \"Starting Loading of Data Set....\" start = time.time() for", "\"Starting Training of Models for Hash Vectorizer Feature.....\" start =", "bagging.predict(test_data_bag) end = time.time() print \"Time Taken to Test the", "\", end-start print \"Starting Prediction of Test Labels ....\" start", "\"Starting Loading of Data Set....\" start = time.time() for recipe", "np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list)", "ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] + 1 else: ingredient_map[ingredient] = 1", "cuisine_map = {} cuisine_numerical_map = {} ingredient_numerical_map = {} ingredient_map", "ingredient in ingredient_set: ingredient_numerical_map[ingredient] = c c = c+1 print", "start = time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict", "to Train Extract Different Features : \", end-start test_labels =", "HashingVectorizer from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from", "for Hash Vectorizer Feature.....\" start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels)", "train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]] c = c+1 end = time.time() print", "in jsonData: if \"cuisine\" in recipe: train_labels[c] = cuisine_numerical_map[recipe[\"cuisine\"]] c" ]
[ "13] rank = ranks[deck[i] % 13] print(\"Card number\", deck[i], \"is", "Display the first four cards for i in range(4): suit", "= [\"Spades\", \"Hearts\", \"Diamonds\", \"Clubs\"] ranks = [\"Ace\", \"2\", \"3\",", "in range(4): suit = suits[deck[i] // 13] rank = ranks[deck[i]", "first four cards for i in range(4): suit = suits[deck[i]", "= [x for x in range(52)] # Create suits and", "% 13] print(\"Card number\", deck[i], \"is the\", rank, \"of\", suit)", "\"10\", \"Jack\", \"Queen\", \"King\"] # Shuffle the cards random.shuffle(deck) #", "random.shuffle(deck) # Display the first four cards for i in", "\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\",", "\"Hearts\", \"Diamonds\", \"Clubs\"] ranks = [\"Ace\", \"2\", \"3\", \"4\", \"5\",", "suits and ranks lists suits = [\"Spades\", \"Hearts\", \"Diamonds\", \"Clubs\"]", "of cards deck = [x for x in range(52)] #", "# Create a deck of cards deck = [x for", "cards deck = [x for x in range(52)] # Create", "x in range(52)] # Create suits and ranks lists suits", "deck = [x for x in range(52)] # Create suits", "range(52)] # Create suits and ranks lists suits = [\"Spades\",", "Create a deck of cards deck = [x for x", "\"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"]", "# Create suits and ranks lists suits = [\"Spades\", \"Hearts\",", "ranks lists suits = [\"Spades\", \"Hearts\", \"Diamonds\", \"Clubs\"] ranks =", "lists suits = [\"Spades\", \"Hearts\", \"Diamonds\", \"Clubs\"] ranks = [\"Ace\",", "[\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\",", "rank = ranks[deck[i] % 13] print(\"Card number\", deck[i], \"is the\",", "\"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] # Shuffle", "i in range(4): suit = suits[deck[i] // 13] rank =", "ranks = [\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\",", "[\"Spades\", \"Hearts\", \"Diamonds\", \"Clubs\"] ranks = [\"Ace\", \"2\", \"3\", \"4\",", "\"Clubs\"] ranks = [\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",", "suits = [\"Spades\", \"Hearts\", \"Diamonds\", \"Clubs\"] ranks = [\"Ace\", \"2\",", "\"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] # Shuffle the cards", "in range(52)] # Create suits and ranks lists suits =", "\"Diamonds\", \"Clubs\"] ranks = [\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\",", "the first four cards for i in range(4): suit =", "// 13] rank = ranks[deck[i] % 13] print(\"Card number\", deck[i],", "range(4): suit = suits[deck[i] // 13] rank = ranks[deck[i] %", "= ranks[deck[i] % 13] print(\"Card number\", deck[i], \"is the\", rank,", "cards for i in range(4): suit = suits[deck[i] // 13]", "\"King\"] # Shuffle the cards random.shuffle(deck) # Display the first", "Shuffle the cards random.shuffle(deck) # Display the first four cards", "suit = suits[deck[i] // 13] rank = ranks[deck[i] % 13]", "deck of cards deck = [x for x in range(52)]", "import random # Create a deck of cards deck =", "ranks[deck[i] % 13] print(\"Card number\", deck[i], \"is the\", rank, \"of\",", "= [\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\",", "a deck of cards deck = [x for x in", "\"9\", \"10\", \"Jack\", \"Queen\", \"King\"] # Shuffle the cards random.shuffle(deck)", "for i in range(4): suit = suits[deck[i] // 13] rank", "# Shuffle the cards random.shuffle(deck) # Display the first four", "# Display the first four cards for i in range(4):", "four cards for i in range(4): suit = suits[deck[i] //", "[x for x in range(52)] # Create suits and ranks", "\"Queen\", \"King\"] # Shuffle the cards random.shuffle(deck) # Display the", "= suits[deck[i] // 13] rank = ranks[deck[i] % 13] print(\"Card", "cards random.shuffle(deck) # Display the first four cards for i", "\"Jack\", \"Queen\", \"King\"] # Shuffle the cards random.shuffle(deck) # Display", "\"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] #", "suits[deck[i] // 13] rank = ranks[deck[i] % 13] print(\"Card number\",", "random # Create a deck of cards deck = [x", "\"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"] # Shuffle the", "\"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\",", "Create suits and ranks lists suits = [\"Spades\", \"Hearts\", \"Diamonds\",", "the cards random.shuffle(deck) # Display the first four cards for", "for x in range(52)] # Create suits and ranks lists", "and ranks lists suits = [\"Spades\", \"Hearts\", \"Diamonds\", \"Clubs\"] ranks" ]
[ "use_bias self.normalize = normalize self.out_features = out_features self.epsion = epsion", "self.normalize = normalize self.out_features = out_features self.epsion = epsion def", "nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr=\"sum\",", "initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features),", "node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = [] for i in", "+= tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None: aggr_out +=", "RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\" import tensorflow as tf", "self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize self.out_features", ") self.built = True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message,", "build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type =", "input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes)", "self.use_bias = use_bias self.normalize = normalize self.out_features = out_features self.epsion", "message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): \"\"\" :param edge_source_states: [M,H]", ") return messages def call(self, inputs): aggr_out = self.propagate(inputs) #", "[M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training:", "weight_r) if self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32)", "if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else:", "__init__(self, out_features, epsion=1e-7, aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution,", "initializer=self.bias_initializer, name='b', ) else: self.bias = None self.weight_o = self.add_weight(", "tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message,", "inputs): aggr_out = self.propagate(inputs) # message_passing + update aggr_out +=", ") else: self.bias = None self.weight_o = self.add_weight( shape=(in_features, self.out_features),", "-*- coding:utf-8 -*- \"\"\" @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3))", "dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\"", "self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = (", "name='wo', ) self.built = True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message,", "= out_features self.epsion = epsion def build(self, input_shapes): node_embedding_shapes =", ":return: \"\"\" weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if", "* messages ) return messages def call(self, inputs): aggr_out =", "import tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing):", "def __init__(self, out_features, epsion=1e-7, aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs):", "adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights", "[2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings,", "aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer", "edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): \"\"\" :param edge_source_states: [M,H] :param edge_target_states:", "self.propagate(inputs) # message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if", "self.bias = None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo',", "edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: \"\"\"", "node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2,", "tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ] layer =", "return messages def call(self, inputs): aggr_out = self.propagate(inputs) # message_passing", "tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize", "self.epsion), axis=-1) * messages ) return messages def call(self, inputs):", "self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias =", "num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): \"\"\" :param edge_source_states: [M,H] :param edge_target_states: [M,H]", "1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4],", "normalize self.out_features = out_features self.epsion = epsion def build(self, input_shapes):", "= len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias =", "message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is", "= self.propagate(inputs) # message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o)", "4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]],", "node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features", "weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if", "self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True def message_function(self, edge_source_states,", "else: self.bias = None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer,", "range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight)", "self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias", "1], [2, 4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12)", "tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None: aggr_out += self.bias", "#! usr/bin/env python3 # -*- coding:utf-8 -*- \"\"\" @Author:<NAME> Usage:", "self.epsion = epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes", "shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias = None self.weight_o =", "if self.bias is not None: aggr_out += self.bias return aggr_out", "bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer", "self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias = None self.weight_o", ":param edge_type_idx: :param training: :return: \"\"\" weight_r = self._edge_type_weights[edge_type_idx] messages", "= True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): \"\"\"", "RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True,", "python3 # -*- coding:utf-8 -*- \"\"\" @Author:<NAME> Usage: node_embeddings =", "messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = ( tf.expand_dims(1.0", "# message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias", "messages ) return messages def call(self, inputs): aggr_out = self.propagate(inputs)", "update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None:", "MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform',", "tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize self.out_features = out_features", "kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)", "= input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features =", "edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): \"\"\" :param edge_source_states: [M,H] :param", "adjacency_lists)) \"\"\" import tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing", "= RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\" import tensorflow as", "call(self, inputs): aggr_out = self.propagate(inputs) # message_passing + update aggr_out", "self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight(", "= self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True", "layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\" import tensorflow as tf from nlpgnn.gnn.messagepassing import", "def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): \"\"\" :param edge_source_states:", "= tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = ( tf.expand_dims(1.0 /", "\"\"\" weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize:", "= tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize =", "tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages )", "+ self.epsion), axis=-1) * messages ) return messages def call(self,", "coding:utf-8 -*- \"\"\" @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists", ") self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b',", "@Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0,", "use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer =", ":param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return:", "\"\"\" import tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing class", "tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2,", "shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias =", "normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer =", "def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type", "[2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2,", "[] self._edge_type_bias = [] for i in range(num_edge_type): weight =", "[M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: \"\"\" weight_r", "num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: \"\"\" weight_r = self._edge_type_weights[edge_type_idx]", "aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None: aggr_out", "i in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i),", "dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ] layer", "len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = []", "= None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', )", "self.weight_o) if self.bias is not None: aggr_out += self.bias return", "= self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages =", "= input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights =", "self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True def", "shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True def message_function(self,", "4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists))", "x = layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\" import tensorflow as tf from", "4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ]", "name='b', ) else: self.bias = None self.weight_o = self.add_weight( shape=(in_features,", "Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1],", "def call(self, inputs): aggr_out = self.propagate(inputs) # message_passing + update", "num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias", "True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): \"\"\" :param", "self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize", "= self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias:", "if self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) +", "= [] self._edge_type_bias = [] for i in range(num_edge_type): weight", "adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32),", "self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias =", "super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias", "/ (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages ) return", "4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x =", "class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros',", "( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages", "+ update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not", "= ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) *", ":param training: :return: \"\"\" weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states,", "from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7,", ":param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx:", "= layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\" import tensorflow as tf from nlpgnn.gnn.messagepassing", "[2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32)", "(tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages ) return messages", "tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2,", "= use_bias self.normalize = normalize self.out_features = out_features self.epsion =", "self._edge_type_weights = [] self._edge_type_bias = [] for i in range(num_edge_type):", "epsion=1e-7, aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs)", "= tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4],", "self.out_features = out_features self.epsion = epsion def build(self, input_shapes): node_embedding_shapes", "= node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = [] for i", "name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer,", "layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\" import tensorflow", "training: :return: \"\"\" weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r)", "[2, 4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x", "[ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1],", "input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = []", "# -*- coding:utf-8 -*- \"\"\" @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5,", "= self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias = None", "tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features,", "**kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer)", "import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr=\"sum\", normalize=True,", "**kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias", "= normalize self.out_features = out_features self.epsion = epsion def build(self,", "self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built =", "-*- \"\"\" @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists =", ":param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: \"\"\" weight_r =", "for i in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer,", "self._edge_type_bias = [] for i in range(num_edge_type): weight = self.add_weight(", "3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2, 4]],", "<gh_stars>100-1000 #! usr/bin/env python3 # -*- coding:utf-8 -*- \"\"\" @Author:<NAME>", "None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built", "] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) \"\"\" import", "edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param", "edge_type_idx): \"\"\" :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M]", "epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists", "aggr_out = self.propagate(inputs) # message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings,", "messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1)", "\"\"\" @Author:<NAME> Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [", "edge_type_idx: :param training: :return: \"\"\" weight_r = self._edge_type_weights[edge_type_idx] messages =", "messages def call(self, inputs): aggr_out = self.propagate(inputs) # message_passing +", "self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion),", "in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), )", "num_outing_to_node_per_message, edge_type_idx): \"\"\" :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param", "= tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize self.out_features =", "\"\"\" :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param", "tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def", "out_features self.epsion = epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings", "[] for i in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features),", "self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', )", "axis=-1) * messages ) return messages def call(self, inputs): aggr_out", "= [ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0,", "input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1]", "out_features, epsion=1e-7, aggr=\"sum\", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr,", "initializer=self.kernel_initializer, name='wo', ) self.built = True def message_function(self, edge_source_states, edge_target_states,", "self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias", "self.built = True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx):", "= epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes =", "usr/bin/env python3 # -*- coding:utf-8 -*- \"\"\" @Author:<NAME> Usage: node_embeddings", "= [] for i in range(num_edge_type): weight = self.add_weight( shape=(in_features,", "in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = [] for", "weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages", "tf.float32) + self.epsion), axis=-1) * messages ) return messages def", "as tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self," ]
[ "call options for each method. If not specified, the default", "(str): The resource name of the dataset to retrieve. retry", "like. self._inner_api_calls = {} # Service calls def create_dataset( self,", "same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A", "response.metadata() Args: name (str): The resource name of the dataset", "# transport methods, wrapped with `wrap_method` to add retry, #", "instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances.", "the parameters are invalid. \"\"\" # Wrap the transport method", "An expression for filtering the results of the request. -", "2.0 (the \"License\"); # you may not use this file", "from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method", "\"\"\"Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport instance,", "actual callables which invoke the proper # transport methods, wrapped", "RPC # from the client configuration. # (Ordinarily, these are", "exist. All imported annotations and examples will be added. input_config", "output location. If a dict is provided, it must be", "export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Exports", "``dataset_id=5`` --> The model was created from a sibling dataset", "configuration. # (Ordinarily, these are the defaults specified in the", "Required. Dataset name. Dataset must already exist. All imported annotations", "# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project, location=location, ) @classmethod def dataset_path(cls, project,", "ValueError: If the parameters are invalid. \"\"\" # Wrap the", "the models. filter_ (str): An expression for filtering the results", "client.get_model_evaluation(name) Args: name (str): Resource name for the model evaluation.", "AutoMlClient: The constructed client. \"\"\" credentials = service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] =", "operation = self._inner_api_calls[\"export_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic(", "items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_dataset( self, name,", "= response.metadata() Args: name (str): Required. Dataset name. Dataset must", "for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used", "google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import", "server. The server never reuses names that it has created", "The dataset has translation\\_dataset\\_metadata. page_size (int): The maximum number of", "Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError:", "default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls[\"create_dataset\"]( request,", "per-page, this determines the maximum number of resources in a", "retry and timeout logic. if \"create_model\" not in self._inner_api_calls: self._inner_api_calls[", "for = or !=. Some examples of using the filter", "a user-agent string along with API requests. If ``None``, then", "the client will attempt to ascertain the credentials from the", "dataset=dataset) return self._inner_api_calls[\"create_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_dataset(", "def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets", "operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self, parent, model,", "# TODO: Initialize `dataset`: >>> dataset = {} >>> >>>", "deprecated; use \" \"`transport` instead.\", PendingDeprecationWarning, stacklevel=2, ) # Instantiate", "string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project, location=location, model=model, ) @classmethod def", "both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials", "to go away. if client_config is not None: warnings.warn( \"The", "\"\"\"Return a fully-qualified model string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project, location=location,", "protocol. This argument may also be a callable which returns", "pass >>> >>> >>> # Alternatively: >>> >>> # Iterate", "specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a dataset and all", "metadata = response.metadata() Args: name (str): Resource name of the", ">>> >>> response = client.delete_model(name) >>> >>> def callback(operation_future): ...", ") request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"]( request, retry=retry, timeout=timeout, metadata=metadata", ">>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]',", "google.api_core.page_iterator import google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from", "google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import", "evaluation was done for aggregate of all annotation specs. page_size", "google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template import grpc from", "failed for any reason. google.api_core.exceptions.RetryError: If the request failed due", "not in self._inner_api_calls: self._inner_api_calls[ \"create_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry,", "... pass >>> >>> >>> # Alternatively: >>> >>> #", "client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info", "of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless", "library. \"\"\" # Raise deprecation warnings for things we want", "Example: >>> from google.cloud import automl_v1beta1 >>> >>> client =", "info used to send a user-agent string along with API", "def model_evaluation_path(cls, project, location, model, model_evaluation): \"\"\"Return a fully-qualified model_evaluation", "provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError:", "dictionary. _INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\" @classmethod def from_service_account_file(cls, filename, *args, **kwargs):", "argument is mutually exclusive with ``credentials``; providing both will raise", "retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata,", "The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]):", "this client using the provided credentials file. Args: filename (str):", "desired input location. If a dict is provided, it must", "License for the specific language governing permissions and # limitations", "only need to set this if you're developing your own", "model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "# deserialization and actually sending data to the service. if", "client.create_dataset(parent, dataset) Args: parent (str): The resource name of the", "of the deployed model in the production environment. Returns ``google.protobuf.Empty``", "a dataset and all of its contents. Returns empty response", "operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self, name, input_config,", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model. Example: >>> from", "iterator def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.delete_dataset(name)", "as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object", "a model is already deployed, this only deletes the model", "account private key json file. args: Additional arguments to pass", "per- resource, this parameter does not affect the return value.", "out the default settings for retry and timeout for each", "method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\",", "the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location.", "for things we want to go away. if client_config is", "to add retry and timeout logic. if \"get_model\" not in", "def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deploys", "model to deploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "transport. # The transport is responsible for handling serialization and", "SERVICE_ADDRESS = \"automl.googleapis.com:443\" \"\"\"The default address of the service.\"\"\" #", "Parse out the default settings for retry and timeout for", "Deletes a dataset and all of its contents. Returns empty", "and # limitations under the License. \"\"\"Accesses the google.cloud.automl.v1beta1 AutoMl", "is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises:", "transport method to add retry and timeout logic. if \"export_data\"", "= service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None,", "it completes, and ``delete_details`` in the ``metadata`` field. Example: >>>", "examples will be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired", ">>> response = client.deploy_model(name) Args: name (str): Resource name of", "service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file", "that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.", "transport method to add retry and timeout logic. if \"list_model_evaluations\"", "'[LOCATION]', '[MODEL]') >>> >>> # Iterate over all results >>>", "client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"]( request, retry=retry, timeout=timeout,", "name of the project from which to list datasets. filter_", "if \"list_model_evaluations\" not in self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\" ] = google.api_core.gapic_v1.method.wrap_method(", ">>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata =", "Returns a Model in the ``response`` field when it completes.", "requests. If ``None`` is specified, requests will not be retried.", ">>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # Iterate", "be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset`", "added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location. If", "in self._inner_api_calls: self._inner_api_calls[ \"import_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout,", ") iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout, metadata=metadata,", "self._inner_api_calls[\"import_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client,", ">>> model = {} >>> >>> response = client.create_model(parent, model)", "filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry,", "must be of the same form as the protobuf message", "If page streaming is performed per- resource, this parameter does", "\"delete_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, ) request", "(google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials", "# TODO: Initialize `output_config`: >>> output_config = {} >>> >>>", "\"\"\"Accesses the google.cloud.automl.v1beta1 AutoMl API.\"\"\" import functools import pkg_resources import", "filename, *args, **kwargs): \"\"\"Creates an instance of this client using", "and the like. self._inner_api_calls = {} # Service calls def", "= google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name)", "self._inner_api_calls: self._inner_api_calls[ \"list_datasets\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info,", "these are the defaults specified in the `*_config.py` # file", "invalid. \"\"\" # Wrap the transport method to add retry", "and timeout logic. if \"create_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"create_dataset\"", "OF ANY KIND, either express or implied. # See the", "TODO: Initialize `dataset`: >>> dataset = {} >>> >>> response", "): \"\"\" Exports dataset's data to a Google Cloud Storage", "See the License for the specific language governing permissions and", "= client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.delete_model(name) >>>", "metadata=None, ): \"\"\" Undeploys model. Returns an ``UndeployModelResponse`` in the", "automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>>", "default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"]( request,", "client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator", "These are the actual callables which invoke the proper #", "self._inner_api_calls[ \"list_model_evaluations\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, )", "from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto", "provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default,", "timeout=timeout, metadata=metadata, ), request=request, items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator", "to in writing, software # distributed under the License is", "for actually making the API calls. The default transport uses", "... for element in page: ... # process element ...", "model in the production environment. Returns ``google.protobuf.Empty`` in the ``response``", ") if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION )", "id for the item is ``{dataset_id}``. \"\"\" SERVICE_ADDRESS = \"automl.googleapis.com:443\"", "(str): The resource name of the dataset to delete. retry", "or agreed to in writing, software # distributed under the", "of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object", "google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import", "not in self._inner_api_calls: self._inner_api_calls[ \"deploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry,", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a dataset. Example: >>> from", "dataset has translation\\_dataset\\_metadata. page_size (int): The maximum number of resources", "the case. - ``dataset_id`` - for = or !=. Some", "request. - ``dataset_metadata`` - for existence of the case. An", "{} >>> >>> response = client.create_model(parent, model) >>> >>> def", "return iterator def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0", "you create a model, several model evaluations are created for", "a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "default transport uses the gRPC protocol. This argument may also", ">>> # TODO: Initialize `model`: >>> model = {} >>>", "client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls[\"create_model\"]( request,", "compliance with the License. # You may obtain a copy", "undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "This is the key used to # find the method", "DEPRECATED. A dictionary of call options for each method. If", "Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A", "metadata = response.metadata() Args: name (str): The resource name of", "it has created after the resources with those names are", "Args: name (str): Resource name for the model evaluation. retry", "proper # transport methods, wrapped with `wrap_method` to add retry,", "): \"\"\" Gets a model evaluation. Example: >>> from google.cloud", "timeout=timeout, metadata=metadata ) def list_models( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT,", ") # Save a dictionary of cached API call functions.", "= google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name)", "\"Received both a transport instance and \" \"credentials; these are", ">>> >>> # TODO: Initialize `model`: >>> model = {}", "a Model in the ``response`` field when it completes. When", "a fully-qualified dataset string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location, dataset=dataset,", "through which to make calls. This argument is mutually exclusive", "is ``{dataset_id}``. \"\"\" SERVICE_ADDRESS = \"automl.googleapis.com:443\" \"\"\"The default address of", "The desired output location. If a dict is provided, it", "returns a transport instance. Callables will be sent the credentials", ">>> >>> response = client.export_data(name, output_config) >>> >>> def callback(operation_future):", "return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self,", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, ) request =", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deploys model. Returns a ``DeployModelResponse`` in", "settings for retry and timeout for each RPC # from", "model=model, ) @classmethod def model_evaluation_path(cls, project, location, model, model_evaluation): \"\"\"Return", "- ``translation_dataset_metadata:*`` --> The dataset has translation\\_dataset\\_metadata. page_size (int): The", "not use this file except in compliance with the License.", ">>> for element in client.list_model_evaluations(parent): ... # process element ...", "# Instantiate the transport. # The transport is responsible for", "logic. if \"export_data\" not in self._inner_api_calls: self._inner_api_calls[ \"export_data\" ] =", "specified in the `*_config.py` # file next to this one.)", "request = service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator(", "form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry", "default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent, model=model) operation =", "'[MODEL]') >>> >>> response = client.get_model(name) Args: name (str): Resource", "field when it completes. When you create a model, several", "you may not use this file except in compliance with", "def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes", "object used to retry requests. If ``None`` is specified, requests", "parent location. filter_ (str): An expression for filtering the results", "Returns an empty response in the ``response`` field when it", "(str): Required. Dataset name. Dataset must already exist. All imported", "metadata. >>> metadata = response.metadata() Args: parent (str): Resource name", "~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location. If a dict is", "name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.get_model(name)", "in self._inner_api_calls: self._inner_api_calls[ \"create_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout,", "be sent the credentials as the first argument and the", "and timeout logic. if \"undeploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\"", "is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send", "raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach", "metadata=None, ): \"\"\" Lists model evaluations. Example: >>> from google.cloud", "this if you're developing your own client library. \"\"\" #", "list datasets. filter_ (str): An expression for filtering the results", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "over all results >>> for element in client.list_models(parent): ... #", "are: - ``image_classification_model_metadata:*`` --> The model has image\\_classification\\_model\\_metadata. - ``dataset_id=5``", "self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info,", "filtering the results of the request. - ``model_metadata`` - for", "add retry, # timeout, and the like. self._inner_api_calls = {}", "import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic", "credentials file. Args: filename (str): The path to the service", "in the ``response`` field when it completes. Example: >>> from", "location=location, model=model, model_evaluation=model_evaluation, ) def __init__( self, transport=None, channel=None, credentials=None,", "metadata=metadata ) def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "client_config (dict): DEPRECATED. A dictionary of call options for each", "self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Exports dataset's", "= client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.undeploy_model(name) Args:", "from_service_account_json = from_service_account_file @classmethod def location_path(cls, project, location): \"\"\"Return a", "serialization and # deserialization and actually sending data to the", "self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"](", "determines the maximum number of resources in a page. retry", "API call functions. # These are the actual callables which", "proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from", "\"create_model\" not in self._inner_api_calls: self._inner_api_calls[ \"create_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model,", "= service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def", "of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "if \"list_models\" not in self._inner_api_calls: self._inner_api_calls[ \"list_models\" ] = google.api_core.gapic_v1.method.wrap_method(", "a fully-qualified model_evaluation string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location, model=model,", "element in client.list_models(parent): ... # process element ... pass >>>", "item is ``{dataset_id}``. \"\"\" SERVICE_ADDRESS = \"automl.googleapis.com:443\" \"\"\"The default address", "): \"\"\" Lists model evaluations. Example: >>> from google.cloud import", "this parameter does not affect the return value. If page", "default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object", "are the defaults specified in the `*_config.py` # file next", "a dataset. Returns an empty response in the ``response`` field", ">>> metadata = response.metadata() Args: name (str): Required. The resource", "models. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.undeploy_model(name) Args: name", "the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used", "retry=retry, timeout=timeout, metadata=metadata ) def list_model_evaluations( self, parent, filter_=None, page_size=None,", "client. This is the key used to # find the", "for element in client.list_datasets(parent): ... # process element ... pass", "create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates", "that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance.", "self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, ): \"\"\"Constructor. Args: transport", "\" \"credentials; these are mutually exclusive.\" ) self.transport = transport", "import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2", "default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size )", "being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "in self._inner_api_calls: self._inner_api_calls[ \"list_models\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout,", "= google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent,", "self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model_evaluation\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", )", "client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>> response = client.get_model_evaluation(name) Args:", "server never reuses names that it has created after the", "\"\"\" Creates a model. Returns a Model in the ``response``", "not None: warnings.warn( \"The `client_config` argument is deprecated.\", PendingDeprecationWarning, stacklevel=2,", "Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport instance, responsible", "the provided credentials file. Args: filename (str): The path to", ") return iterator def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "and timeout logic. if \"delete_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\"", "timeout logic. if \"create_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"create_dataset\" ]", "when it completes. When you create a model, several model", "Args: parent (str): Resource name of the parent project where", "`dataset`: >>> dataset = {} >>> >>> response = client.create_dataset(parent,", "parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all", "in client.list_models(parent).pages: ... for element in page: ... # process", "list_datasets( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "one page at a time >>> for page in client.list_model_evaluations(parent).pages:", "return self._inner_api_calls[\"get_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_models( self,", "time >>> for page in client.list_datasets(parent).pages: ... for element in", "client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.deploy_model(name) Args: name", "... pass Args: parent (str): Resource name of the model", "kwargs: Additional arguments to pass to the constructor. Returns: AutoMlClient:", "type], ~.AutoMlGrpcTransport]): A transport instance, responsible for actually making the", "google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info", "also be configured to iterate over the pages of the", "and one evaluation for each annotation spec. Example: >>> from", "to add retry and timeout logic. if \"delete_model\" not in", "client.undeploy_model(name) Args: name (str): Resource name of the model to", "using the filter are: - ``annotation_spec_id!=4`` --> The model evaluation", "# These are the actual callables which invoke the proper", "google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self, name,", "google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project, location=location, ) @classmethod def dataset_path(cls, project, location,", "transport method to add retry and timeout logic. if \"deploy_model\"", "may also be a callable which returns a transport instance.", "specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str,", "client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.delete_dataset(name) >>> >>>", "is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can also", "\"\"\"Return a fully-qualified dataset string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location,", "over the pages of the response through the `options` parameter.", "client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls[\"delete_model\"]( request, retry=retry,", "at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "has image\\_classification\\_model\\_metadata. - ``dataset_id=5`` --> The model was created from", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "... # Handle result. ... result = operation_future.result() >>> >>>", "project, location, model): \"\"\"Return a fully-qualified model string.\"\"\" return google.api_core.path_template.expand(", "= {} >>> >>> response = client.create_model(parent, model) >>> >>>", ") def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "into a dataset. Returns an empty response in the ``response``", "input_config) >>> >>> def callback(operation_future): ... # Handle result. ...", "method to add retry and timeout logic. if \"create_model\" not", "the model evaluations for. If modelId is set as \"-\",", ">>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> #", "to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] ) # Save", "response = client.get_dataset(name) Args: name (str): The resource name of", "- ``dataset_id=5`` --> The model was created from a sibling", "of the parent project where the model is being created.", "element in page: ... # process element ... pass Args:", "\"\"\"Return a fully-qualified location string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project, location=location,", "~.AutoMlGrpcTransport]): A transport instance, responsible for actually making the API", "metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator`", "that if ``retry`` is specified, the timeout applies to each", "return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self,", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a dataset. Example: >>>", "https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "transport instance to ``transport``; doing so will raise an exception.", "transport=None, channel=None, credentials=None, client_config=None, client_info=None, ): \"\"\"Constructor. Args: transport (Union[~.AutoMlGrpcTransport,", "input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Imports data into a", "google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest( parent=parent,", "file except in compliance with the License. # You may", ">>> >>> # TODO: Initialize `input_config`: >>> input_config = {}", "and \" \"credentials; these are mutually exclusive.\" ) self.transport =", "def export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", ") @classmethod def model_path(cls, project, location, model): \"\"\"Return a fully-qualified", "the client_config dictionary. _INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\" @classmethod def from_service_account_file(cls, filename,", "): \"\"\" Deploys model. Returns a ``DeployModelResponse`` in the ``response``", "will not be retried. timeout (Optional[float]): The amount of time,", "retry and timeout logic. if \"list_models\" not in self._inner_api_calls: self._inner_api_calls[", "import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2", "with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials):", "resources with those names are deleted. An ID of a", "# Wrap the transport method to add retry and timeout", "The default transport uses the gRPC protocol. This argument may", "channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make", "- ``image_classification_model_metadata:*`` --> The model has image\\_classification\\_model\\_metadata. - ``dataset_id=5`` -->", "parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # Iterate over", "to the service account private key json file. args: Additional", "the model in AutoML BE, and does not change the", "from across all models of the parent location. filter_ (str):", "as the first argument and the default transport class as", "delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a", "project=project, location=location, model=model, model_evaluation=model_evaluation, ) def __init__( self, transport=None, channel=None,", "name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object", "over all results >>> for element in client.list_datasets(parent): ... #", "be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig`", "self._inner_api_calls: self._inner_api_calls[ \"create_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info,", "governing permissions and # limitations under the License. \"\"\"Accesses the", "`client_config` argument is deprecated.\", PendingDeprecationWarning, stacklevel=2, ) else: client_config =", "client.list_models(parent): ... # process element ... pass >>> >>> >>>", "import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation", ":class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry,", "mutually exclusive with providing a transport instance to ``transport``; doing", "the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used", "all of its contents. Returns empty response in the ``response``", "of using the filter are: - ``annotation_spec_id!=4`` --> The model", "operation = self._inner_api_calls[\"create_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic(", "be retried. timeout (Optional[float]): The amount of time, in seconds,", "``NOT annotation_spec_id:*`` --> The model evaluation was done for aggregate", "Initialize `output_config`: >>> output_config = {} >>> >>> response =", "application to the service. If none are specified, the client", "model. Returns a Model in the ``response`` field when it", "the proper # transport methods, wrapped with `wrap_method` to add", "if \"list_datasets\" not in self._inner_api_calls: self._inner_api_calls[ \"list_datasets\" ] = google.api_core.gapic_v1.method.wrap_method(", "request, retry=retry, timeout=timeout, metadata=metadata ) def list_model_evaluations( self, parent, filter_=None,", ":class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can also be configured to iterate", "Additional arguments to pass to the constructor. kwargs: Additional arguments", "from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2", "channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo(", "if \"delete_model\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_model\" ] = google.api_core.gapic_v1.method.wrap_method(", ") def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, ) request =", ":class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "self._inner_api_calls[ \"delete_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, )", "KIND, either express or implied. # See the License for", "client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version", "due to a retryable error and retry attempts failed. ValueError:", "`wrap_method` to add retry, # timeout, and the like. self._inner_api_calls", "= auto_ml_client_config.config if channel: warnings.warn( \"The `channel` argument is deprecated;", "constructor. kwargs: Additional arguments to pass to the constructor. Returns:", "self._inner_api_calls[ \"list_datasets\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, )", ") def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `dataset`:", "= pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object): \"\"\" AutoML Server API. The resource", "add retry and timeout logic. if \"delete_model\" not in self._inner_api_calls:", "- ``annotation_spec_id`` - for =, != or existence. See example", "to iterate over the pages of the response through the", "- for existence of the case. - ``dataset_id`` - for", "all annotation specs. page_size (int): The maximum number of resources", "operation = self._inner_api_calls[\"delete_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic(", "this client. This is the key used to # find", "= client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.delete_dataset(name) >>>", "# find the method configuration in the client_config dictionary. _INTERFACE_NAME", "exclusive with providing a transport instance to ``transport``; doing so", "was done for aggregate of all annotation specs. page_size (int):", "(the \"License\"); # you may not use this file except", "default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls[\"delete_dataset\"]( request,", "the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used", "None: warnings.warn( \"The `client_config` argument is deprecated.\", PendingDeprecationWarning, stacklevel=2, )", "an exception. client_config (dict): DEPRECATED. A dictionary of call options", "complete. Note that if ``retry`` is specified, the timeout applies", "each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is", "client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle result. ...", "retry requests. If ``None`` is specified, requests will not be", "ID of a resource is the last element of the", "and timeout logic. if \"list_models\" not in self._inner_api_calls: self._inner_api_calls[ \"list_models\"", "self._inner_api_calls[\"create_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client,", "): \"\"\" Gets a dataset. Example: >>> from google.cloud import", "specified, requests will not be retried. timeout (Optional[float]): The amount", "# Save a dictionary of cached API call functions. #", "arguments to pass to the constructor. kwargs: Additional arguments to", "iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can also be configured", "responsible for handling serialization and # deserialization and actually sending", "Deploys model. Returns a ``DeployModelResponse`` in the ``response`` field when", "else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out", "# # Unless required by applicable law or agreed to", "An example of using the filter is: - ``translation_dataset_metadata:*`` -->", "google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto import", "google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template import", "# process element ... pass >>> >>> >>> # Alternatively:", "= google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request,", "Storage bucket. Returns an empty response in the ``response`` field", "self._inner_api_calls: self._inner_api_calls[ \"delete_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info,", ">>> metadata = response.metadata() Args: name (str): Required. Dataset name.", "cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def location_path(cls, project, location):", "metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture`", "a model evaluation. Example: >>> from google.cloud import automl_v1beta1 >>>", "and timeout logic. if \"list_datasets\" not in self._inner_api_calls: self._inner_api_calls[ \"list_datasets\"", "path to the service account private key json file. args:", "if \"get_model\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model\" ] = google.api_core.gapic_v1.method.wrap_method(", "timeout logic. if \"get_model_evaluation\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\" ]", "to list the model evaluations for. If modelId is set", "provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError:", "results one page at a time >>> for page in", "model. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "\"\"\" Deletes a model. If a model is already deployed,", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, ) request =", "implied. # See the License for the specific language governing", "retry and timeout logic. if \"delete_model\" not in self._inner_api_calls: self._inner_api_calls[", "model evaluations are created for it: a global evaluation, and", "page streaming is performed per-page, this determines the maximum number", "project, location, dataset): \"\"\"Return a fully-qualified dataset string.\"\"\" return google.api_core.path_template.expand(", "credentials: raise ValueError( \"Received both a transport instance and \"", "this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can", "must already exist. All imported annotations and examples will be", "# Iterate over results one page at a time >>>", "dataset. Returns an empty response in the ``response`` field when", "transport uses the gRPC protocol. This argument may also be", "\"\"\" # Raise deprecation warnings for things we want to", "metadata=metadata, ), request=request, items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def", "= client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.get_dataset(name) Args:", "The model evaluation was done for annotation spec with ID", "self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT,", "empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object): \"\"\" AutoML Server API.", "it completes. When you create a model, several model evaluations", "``model_metadata`` - for existence of the case. - ``dataset_id`` -", "only deletes the model in AutoML BE, and does not", "google.cloud.automl.v1beta1 AutoMl API.\"\"\" import functools import pkg_resources import warnings from", "client_config = auto_ml_client_config.config if channel: warnings.warn( \"The `channel` argument is", "individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided", "then the id for the item is ``{dataset_id}``. \"\"\" SERVICE_ADDRESS", "page_size (int): The maximum number of resources contained in the", "the ``response`` field when it completes. When you create a", "project=project, location=location, model=model, ) @classmethod def model_evaluation_path(cls, project, location, model,", "= transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials: raise ValueError(", "completes, and ``delete_details`` in the ``metadata`` field. Example: >>> from", "automl_v1beta1.AutoMlClient() >>> >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>>", "= response.metadata() Args: name (str): Required. The resource name of", "actually making the API calls. The default transport uses the", "request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT,", "\"undeploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, ) request", "this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can", "self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", )", "Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A", "retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", ">>> >>> response = client.import_data(name, input_config) >>> >>> def callback(operation_future):", "of all annotation specs. page_size (int): The maximum number of", "request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model,", "client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>>", "Service calls def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "Unless required by applicable law or agreed to in writing,", "results of the request. - ``model_metadata`` - for existence of", "if transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, )", "\"\"\" Gets a model. Example: >>> from google.cloud import automl_v1beta1", "transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials: raise ValueError( \"Received", "the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "AutoMl API.\"\"\" import functools import pkg_resources import warnings from google.oauth2", "# TODO: Initialize `model`: >>> model = {} >>> >>>", "'[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `input_config`: >>> input_config", "a sibling dataset with ID 5. page_size (int): The maximum", "`channel` argument is deprecated; use \" \"`transport` instead.\", PendingDeprecationWarning, stacklevel=2,", "the specific language governing permissions and # limitations under the", "When you create a model, several model evaluations are created", "operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT,", "warnings for things we want to go away. if client_config", "# The transport is responsible for handling serialization and #", "the results of the request. - ``dataset_metadata`` - for existence", "Model in the ``response`` field when it completes. When you", "name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry", "if \"create_model\" not in self._inner_api_calls: self._inner_api_calls[ \"create_model\" ] = google.api_core.gapic_v1.method.wrap_method(", "google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name) return", "The resource names are assigned by the server. The server", "\"get_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"get_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset,", ") def list_models( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "client.create_model(parent, model) >>> >>> def callback(operation_future): ... # Handle result.", "change the status of the deployed model in the production", "client.list_model_evaluations(parent): ... # process element ... pass >>> >>> >>>", "retry and timeout logic. if \"undeploy_model\" not in self._inner_api_calls: self._inner_api_calls[", "with providing a transport instance to ``transport``; doing so will", "input_config = {} >>> >>> response = client.import_data(name, input_config) >>>", "its contents. Returns empty response in the ``response`` field when", "def create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "BE, and does not change the status of the deployed", "\"automl.googleapis.com:443\" \"\"\"The default address of the service.\"\"\" # The name", "= google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name)", "the request failed for any reason. google.api_core.exceptions.RetryError: If the request", "desired output location. If a dict is provided, it must", "instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances.", "): \"\"\" Deletes a model. If a model is already", "service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls[\"delete_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return", "is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises:", "model in AutoML BE, and does not change the status", "service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_datasets(", "the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "request failed due to a retryable error and retry attempts", "retry=retry, timeout=timeout, metadata=metadata ) def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "dataset with ID 5. page_size (int): The maximum number of", "`output_config`: >>> output_config = {} >>> >>> response = client.export_data(name,", "location=location, ) @classmethod def dataset_path(cls, project, location, dataset): \"\"\"Return a", "import pkg_resources import warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info", "of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry", "self._inner_api_calls[ \"create_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, )", "applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists model evaluations. Example: >>> from", "--> The model has image\\_classification\\_model\\_metadata. - ``dataset_id=5`` --> The model", "default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size", "model=model, model_evaluation=model_evaluation, ) def __init__( self, transport=None, channel=None, credentials=None, client_config=None,", "All imported annotations and examples will be added. input_config (Union[dict,", "the credentials from the environment. This argument is mutually exclusive", "filtering the results of the request. - ``dataset_metadata`` - for", "client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `output_config`: >>>", "model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "def list_datasets( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "The amount of time, in seconds, to wait for the", "Initialize `dataset`: >>> dataset = {} >>> >>> response =", "google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self, parent,", "Lists models. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "away. if client_config is not None: warnings.warn( \"The `client_config` argument", "in self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout,", "for element in client.list_models(parent): ... # process element ... pass", "an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can also be", "that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance.", "method to add retry and timeout logic. if \"delete_model\" not", "{} # Service calls def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT,", "to a Google Cloud Storage bucket. Returns an empty response", ">>> for page in client.list_models(parent).pages: ... for element in page:", "This argument may also be a callable which returns a", "default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"]( request,", "filter are: - ``annotation_spec_id!=4`` --> The model evaluation was done", "dictionary of call options for each method. If not specified,", "if \"get_model_evaluation\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\" ] = google.api_core.gapic_v1.method.wrap_method(", "dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. If", "from google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto", ">>> for element in client.list_datasets(parent): ... # process element ...", "Args: name (str): Required. Dataset name. Dataset must already exist.", "model evaluation was done for annotation spec with ID different", "credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def location_path(cls,", "with ID 5. page_size (int): The maximum number of resources", "each RPC # from the client configuration. # (Ordinarily, these", "= credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def", "self._inner_api_calls: self._inner_api_calls[ \"get_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info,", ">>> >>> response = client.create_dataset(parent, dataset) Args: parent (str): The", "iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout, metadata=metadata, ),", ") else: client_config = auto_ml_client_config.config if channel: warnings.warn( \"The `channel`", "create. If a dict is provided, it must be of", "client.deploy_model(name) Args: name (str): Resource name of the model to", "is: - ``translation_dataset_metadata:*`` --> The dataset has translation\\_dataset\\_metadata. page_size (int):", "timeout=timeout, metadata=metadata ) def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout, metadata=metadata, ),", ">>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient()", "not be retried. timeout (Optional[float]): The amount of time, in", "response = client.get_model_evaluation(name) Args: name (str): Resource name for the", "models of the parent location. filter_ (str): An expression for", "metadata=metadata ) def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "# Copyright 2018 Google LLC # # Licensed under the", "): \"\"\" Lists datasets in a project. Example: >>> from", "the item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for", "import google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic", "name (str): Resource name of the model being deleted. retry", "\"\"\" Lists datasets in a project. Example: >>> from google.cloud", "Returns an ``UndeployModelResponse`` in the ``response`` field when it completes.", "metadata. >>> metadata = response.metadata() Args: name (str): Resource name", "if \"create_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"create_dataset\" ] = google.api_core.gapic_v1.method.wrap_method(", "model_path(cls, project, location, model): \"\"\"Return a fully-qualified model string.\"\"\" return", "from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto", "aggregate of all annotation specs. page_size (int): The maximum number", "= transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials )", "page: ... # process element ... pass Args: parent (str):", "``UndeployModelResponse`` in the ``response`` field when it completes. Example: >>>", "model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as", ">>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.model_path('[PROJECT]',", "status of the deployed model in the production environment. Returns", "callable(transport): self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials:", "retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model_evaluation\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return", "for each RPC # from the client configuration. # (Ordinarily,", "one evaluation for each annotation spec. Example: >>> from google.cloud", "element in client.list_model_evaluations(parent): ... # process element ... pass >>>", "the credentials as the first argument and the default transport", "the parent location. filter_ (str): An expression for filtering the", "in the ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1", "is performed per-page, this determines the maximum number of resources", "service.\"\"\" # The name of the interface for this client.", "never reuses names that it has created after the resources", "model to list the model evaluations for. If modelId is", "return self._inner_api_calls[\"deploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def undeploy_model( self,", "Instantiate the transport. # The transport is responsible for handling", "be used. Generally, you only need to set this if", "key used to # find the method configuration in the", "timeout logic. if \"create_model\" not in self._inner_api_calls: self._inner_api_calls[ \"create_model\" ]", "You may obtain a copy of the License at #", "location=location, dataset=dataset, ) @classmethod def model_path(cls, project, location, model): \"\"\"Return", "transport method to add retry and timeout logic. if \"list_datasets\"", "import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc", ">>> response = client.get_model(name) Args: name (str): Resource name of", "this application to the service. If none are specified, the", "parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists datasets", "``transport``; doing so will raise an exception. client_config (dict): DEPRECATED.", "\"\"\"Return a fully-qualified model_evaluation string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location,", "``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "page at a time >>> for page in client.list_model_evaluations(parent).pages: ...", "callables which invoke the proper # transport methods, wrapped with", "model is being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to", "name of the interface for this client. This is the", "file. args: Additional arguments to pass to the constructor. kwargs:", "specs. page_size (int): The maximum number of resources contained in", "): \"\"\" Deletes a dataset and all of its contents.", "Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>>", "from google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto", "\"create_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, ) request", "from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2", "google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name) return", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, ) request =", "the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through", "filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry,", "page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout,", ">>> response = client.create_model(parent, model) >>> >>> def callback(operation_future): ...", "get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a", "(int): The maximum number of resources contained in the underlying", "then default info will be used. Generally, you only need", "``delete_details`` in the ``metadata`` field. Example: >>> from google.cloud import", "default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object", "of the project from which to list datasets. filter_ (str):", "filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists model evaluations.", "return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location, model=model, model_evaluation=model_evaluation, ) def __init__(", "= client.get_model(name) Args: name (str): Resource name of the model.", "Args: name (str): Resource name of the model. retry (Optional[google.api_core.retry.Retry]):", "metadata=None, ): \"\"\" Imports data into a dataset. Returns an", "credentials = service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] = credentials return cls(*args, **kwargs) from_service_account_json", "self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_,", "google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name) return", "if \"get_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"get_dataset\" ] = google.api_core.gapic_v1.method.wrap_method(", "- for existence of the case. An example of using", "find the method configuration in the client_config dictionary. _INTERFACE_NAME =", "timeout logic. if \"deploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"deploy_model\" ]", "(str): The resource name of the project to create the", "specified, the client will attempt to ascertain the credentials from", "Resource name of the model to deploy. retry (Optional[google.api_core.retry.Retry]): A", "transport instance and \" \"credentials; these are mutually exclusive.\" )", "def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes", "to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "!= or existence. See example below for the last. Some", ":class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any", "protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, ) request =", "parent project where the model is being created. model (Union[dict,", "with `wrap_method` to add retry, # timeout, and the like.", "google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model\",", "resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used", "\" \"`transport` instead.\", PendingDeprecationWarning, stacklevel=2, ) # Instantiate the transport.", "= self._inner_api_calls[\"export_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation,", "grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums from", "@classmethod def from_service_account_file(cls, filename, *args, **kwargs): \"\"\"Creates an instance of", "from google.longrunning import operations_pb2 as longrunning_operations_pb2 from google.protobuf import empty_pb2", "used. Generally, you only need to set this if you're", "the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]):", "metadata. >>> metadata = response.metadata() Args: name (str): Required. Dataset", "was created from a sibling dataset with ID 5. page_size", "google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If", "metadata=None, ): \"\"\" Deploys model. Returns a ``DeployModelResponse`` in the", "Args: name (str): Resource name of the model being deleted.", "License. # You may obtain a copy of the License", "The maximum number of resources contained in the underlying API", "method to add retry and timeout logic. if \"list_datasets\" not", "self._inner_api_calls: self._inner_api_calls[ \"import_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info,", "parameters are invalid. \"\"\" # Wrap the transport method to", "retry and timeout logic. if \"deploy_model\" not in self._inner_api_calls: self._inner_api_calls[", "empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "client.export_data(name, output_config) >>> >>> def callback(operation_future): ... # Handle result.", "reuses names that it has created after the resources with", "default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size )", "which returns a transport instance. Callables will be sent the", "used to # find the method configuration in the client_config", "to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "transport method to add retry and timeout logic. if \"delete_dataset\"", "environment. This argument is mutually exclusive with providing a transport", "= google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest(", "be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig`", "= google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] ) # Save a dictionary of cached", "a transport instance to ``transport``; doing so will raise an", "delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "the License. \"\"\"Accesses the google.cloud.automl.v1beta1 AutoMl API.\"\"\" import functools import", "to retry requests. If ``None`` is specified, requests will not", "project. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_models\"],", "self._inner_api_calls: self._inner_api_calls[ \"get_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info,", "A dictionary of call options for each method. If not", "form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry", "the API calls. The default transport uses the gRPC protocol.", "self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"](", "can also be configured to iterate over the pages of", "``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The", "empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "\"delete_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, ) request", "PendingDeprecationWarning, stacklevel=2, ) # Instantiate the transport. # The transport", "is responsible for handling serialization and # deserialization and actually", "be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location.", "transport class as the second argument. channel (grpc.Channel): DEPRECATED. A", "ID 5. page_size (int): The maximum number of resources contained", "not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The", "in self._inner_api_calls: self._inner_api_calls[ \"get_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout,", "location): \"\"\"Return a fully-qualified location string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project,", "Undeploys model. Returns an ``UndeployModelResponse`` in the ``response`` field when", "to # find the method configuration in the client_config dictionary.", "(Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. If a dict is", "= client.create_model(parent, model) >>> >>> def callback(operation_future): ... # Handle", "a retryable error and retry attempts failed. ValueError: If the", "client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `input_config`: >>>", "the request. - ``annotation_spec_id`` - for =, != or existence.", "same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A", "request to complete. Note that if ``retry`` is specified, the", "operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>>", "~google.cloud.automl_v1beta1.types.Model]): The model to create. If a dict is provided,", "# The name of the interface for this client. This", "filter is: - ``translation_dataset_metadata:*`` --> The dataset has translation\\_dataset\\_metadata. page_size", "return value. If page streaming is performed per-page, this determines", "requests. If ``None``, then default info will be used. Generally,", "the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the", "not in self._inner_api_calls: self._inner_api_calls[ \"create_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry,", "Wrap the transport method to add retry and timeout logic.", "that it has created after the resources with those names", "google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic import", ">>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize", "google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest( parent=parent,", "client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]',", "PendingDeprecationWarning, stacklevel=2, ) else: client_config = auto_ml_client_config.config if channel: warnings.warn(", "dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If a dict", "method to add retry and timeout logic. if \"export_data\" not", "client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results >>>", "for the specific language governing permissions and # limitations under", "dataset = {} >>> >>> response = client.create_dataset(parent, dataset) Args:", "resource name of the project from which to list datasets.", "Google Cloud Storage bucket. Returns an empty response in the", "model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import", "\"delete_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset,", "client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `dataset`: >>> dataset", "'[DATASET]') >>> >>> # TODO: Initialize `input_config`: >>> input_config =", "not in self._inner_api_calls: self._inner_api_calls[ \"list_models\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry,", "a resource is the last element of the item's resource", "required by applicable law or agreed to in writing, software", "string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location, model=model, model_evaluation=model_evaluation, ) def", "evaluation, and one evaluation for each annotation spec. Example: >>>", "request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model. Example: >>>", "import data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2", "(Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location. If a dict", "def dataset_path(cls, project, location, dataset): \"\"\"Return a fully-qualified dataset string.\"\"\"", "name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.delete_model(name)", ">>> for page in client.list_model_evaluations(parent).pages: ... for element in page:", "in client.list_model_evaluations(parent).pages: ... for element in page: ... # process", ">>> for page in client.list_datasets(parent).pages: ... for element in page:", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, ) request =", "# (Ordinarily, these are the defaults specified in the `*_config.py`", "metadata. >>> metadata = response.metadata() Args: name (str): The resource", "datasets. filter_ (str): An expression for filtering the results of", "`input_config`: >>> input_config = {} >>> >>> response = client.import_data(name,", "string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project, location=location, ) @classmethod def dataset_path(cls,", "method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model_evaluation\", request_token_field=\"page_token\", response_token_field=\"next_page_token\",", "agreed to in writing, software # distributed under the License", "to requests. These credentials identify this application to the service.", "client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')", "from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>>", "``None``, then default info will be used. Generally, you only", "default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials: raise ValueError( \"Received both a", "does not change the status of the deployed model in", "distributed under the License is distributed on an \"AS IS\"", "\"delete_model\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model,", "= client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `dataset`: >>>", "request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator(", "self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "(str): The resource name of the project from which to", "in self._inner_api_calls: self._inner_api_calls[ \"delete_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout,", "will be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input", "timeout=timeout, metadata=metadata ) def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "self._inner_api_calls = {} # Service calls def create_dataset( self, parent,", "metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset`", "return self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_model_evaluations( self,", "logic. if \"list_models\" not in self._inner_api_calls: self._inner_api_calls[ \"list_models\" ] =", "the actual callables which invoke the proper # transport methods,", "client. \"\"\" credentials = service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] = credentials return cls(*args,", "self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a", "time, in seconds, to wait for the request to complete.", ">>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]',", "for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If a", "{} >>> >>> response = client.export_data(name, output_config) >>> >>> def", "Required. The desired output location. If a dict is provided,", "the project to create the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]):", "the service account private key json file. args: Additional arguments", ") return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model(", "use \" \"`transport` instead.\", PendingDeprecationWarning, stacklevel=2, ) # Instantiate the", "element in client.list_datasets(parent): ... # process element ... pass >>>", "sibling dataset with ID 5. page_size (int): The maximum number", "method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request", "service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial(", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, ) request =", "each annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>>", "Deletes a model. If a model is already deployed, this", "method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an", "google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project, location=location, model=model, ) @classmethod def model_evaluation_path(cls, project,", "in client.list_model_evaluations(parent): ... # process element ... pass >>> >>>", "``Channel`` instance through which to make calls. This argument is", "output_config) >>> >>> def callback(operation_future): ... # Handle result. ...", "= auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None:", "are deleted. An ID of a resource is the last", "``annotation_spec_id!=4`` --> The model evaluation was done for annotation spec", "\"deploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, ) request", "dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a dataset. Example:", "The model to create. If a dict is provided, it", "provided, it must be of the same form as the", "transport method to add retry and timeout logic. if \"delete_model\"", "The model evaluation was done for aggregate of all annotation", "not in self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry,", "page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists models. Example: >>>", "metadata=None, ): \"\"\" Deletes a dataset and all of its", "name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used", "return self._inner_api_calls[\"create_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_dataset( self,", "iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can also be configured", "response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata()", "to add retry and timeout logic. if \"delete_dataset\" not in", ") iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout, metadata=metadata,", "pass Args: parent (str): Resource name of the model to", "for each annotation spec. Example: >>> from google.cloud import automl_v1beta1", "return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project, location=location, model=model, ) @classmethod def model_evaluation_path(cls,", "for any reason. google.api_core.exceptions.RetryError: If the request failed due to", "'[DATASET]') >>> >>> response = client.get_dataset(name) Args: name (str): The", "timeout logic. if \"list_model_evaluations\" not in self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\" ]", "import operations_pb2 as longrunning_operations_pb2 from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION =", "provided credentials file. Args: filename (str): The path to the", "and timeout logic. if \"list_model_evaluations\" not in self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\"", "request = service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls[\"delete_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata", "= client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>> response = client.get_model_evaluation(name)", "the last. Some examples of using the filter are: -", "\"export_data\" not in self._inner_api_calls: self._inner_api_calls[ \"export_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data,", "service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 from google.protobuf import", "page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists model evaluations. Example:", "operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import", "OR CONDITIONS OF ANY KIND, either express or implied. #", "@classmethod def model_evaluation_path(cls, project, location, model, model_evaluation): \"\"\"Return a fully-qualified", "These credentials identify this application to the service. If none", "!=. Some examples of using the filter are: - ``image_classification_model_metadata:*``", "enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2 from", "the last element of the item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``,", "name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a dataset. Example:", "the License is distributed on an \"AS IS\" BASIS, #", ") iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout, metadata=metadata,", "page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists datasets in a", "filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists models. Example:", "for =, != or existence. See example below for the", "to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If", "= \"google.cloud.automl.v1beta1.AutoMl\" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): \"\"\"Creates an", "timeout for each RPC # from the client configuration. #", "at a time >>> for page in client.list_model_evaluations(parent).pages: ... for", "the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used", "\"export_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, ) request", ">>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.model_path('[PROJECT]', '[LOCATION]',", "protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to", "(Ordinarily, these are the defaults specified in the `*_config.py` #", "If modelId is set as \"-\", this will list model", "element ... pass Args: parent (str): Resource name of the", "By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This", "configured to iterate over the pages of the response through", "the request. - ``dataset_metadata`` - for existence of the case.", "timeout logic. if \"delete_model\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_model\" ]", "method configuration in the client_config dictionary. _INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\" @classmethod", "law or agreed to in writing, software # distributed under", "This object can also be configured to iterate over the", "this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] ) # Save a", "# from the client configuration. # (Ordinarily, these are the", ">>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response =", "of this client using the provided credentials file. Args: filename", "Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed", "parameter does not affect the return value. If page streaming", "retry and timeout logic. if \"get_model\" not in self._inner_api_calls: self._inner_api_calls[", "an empty response in the ``response`` field when it completes.", "model_evaluation=model_evaluation, ) def __init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None,", ">>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]',", "the key used to # find the method configuration in", "done for annotation spec with ID different than 4. -", "= service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] = credentials return cls(*args, **kwargs) from_service_account_json =", "automl_v1beta1.AutoMlClient() >>> >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>>", "get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a", "AutoMlClient(object): \"\"\" AutoML Server API. The resource names are assigned", "delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a", "to create the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset", "# Raise deprecation warnings for things we want to go", "self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a dataset", "for page in client.list_datasets(parent).pages: ... for element in page: ...", "name (str): Required. Dataset name. Dataset must already exist. All", "may obtain a copy of the License at # #", "names that it has created after the resources with those", "logic. if \"get_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"get_dataset\" ] =", "the service.\"\"\" # The name of the interface for this", "string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location, dataset=dataset, ) @classmethod def", "= service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def", "= service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls[\"delete_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "See example below for the last. Some examples of using", "logic. if \"list_model_evaluations\" not in self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\" ] =", ") @classmethod def model_evaluation_path(cls, project, location, model, model_evaluation): \"\"\"Return a", "{} >>> >>> response = client.import_data(name, input_config) >>> >>> def", "results >>> for element in client.list_models(parent): ... # process element", "\"list_model_evaluations\" not in self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations,", "evaluation was done for annotation spec with ID different than", "address of the service.\"\"\" # The name of the interface", "import grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums", "client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"datasets\", request_token_field=\"page_token\",", "metadata=None, ): \"\"\" Gets a model. Example: >>> from google.cloud", "is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can also", "the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the", "has created after the resources with those names are deleted.", "may not use this file except in compliance with the", "in a project. Example: >>> from google.cloud import automl_v1beta1 >>>", ">>> metadata = response.metadata() Args: name (str): The resource name", "``response`` field when it completes. When you create a model,", "= service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None,", "def list_model_evaluations( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> #", "name of the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired", "metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "of the project to create the dataset for. dataset (Union[dict,", "this file except in compliance with the License. # You", "google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location, model=model, model_evaluation=model_evaluation, ) def __init__( self,", "= response.metadata() Args: name (str): Resource name of the model", "request=request, items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_dataset( self,", "2018 Google LLC # # Licensed under the Apache License,", "not in self._inner_api_calls: self._inner_api_calls[ \"import_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry,", "is performed per- resource, this parameter does not affect the", "one page at a time >>> for page in client.list_datasets(parent).pages:", "options for each method. If not specified, the default configuration", "default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls[\"delete_model\"]( request,", "in the ``response`` field when it completes, and ``delete_details`` in", ">>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO:", "transport is responsible for handling serialization and # deserialization and", "# # Licensed under the Apache License, Version 2.0 (the", "- ``NOT annotation_spec_id:*`` --> The model evaluation was done for", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle", "utf-8 -*- # # Copyright 2018 Google LLC # #", "datasets in a project. Example: >>> from google.cloud import automl_v1beta1", "client_config dictionary. _INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\" @classmethod def from_service_account_file(cls, filename, *args,", "retry and timeout logic. if \"create_dataset\" not in self._inner_api_calls: self._inner_api_calls[", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, ) request =", "- ``dataset_id`` - for = or !=. Some examples of", "name of the project to create the dataset for. dataset", "to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If", "metadata=None, ): \"\"\" Lists datasets in a project. Example: >>>", "import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto", "name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a model. If", "names are assigned by the server. The server never reuses", "retry and timeout logic. if \"delete_dataset\" not in self._inner_api_calls: self._inner_api_calls[", "is mutually exclusive with ``credentials``; providing both will raise an", "client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent", "retry, # timeout, and the like. self._inner_api_calls = {} #", "\"\"\" Lists model evaluations. Example: >>> from google.cloud import automl_v1beta1", "the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the", "is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises:", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Exports dataset's data to a", "def import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info", "which to list datasets. filter_ (str): An expression for filtering", "page streaming is performed per- resource, this parameter does not", "retry and timeout logic. if \"list_model_evaluations\" not in self._inner_api_calls: self._inner_api_calls[", "dataset and all of its contents. Returns empty response in", "def list_models( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", ">>> def callback(operation_future): ... # Handle result. ... result =", "the results of the request. - ``model_metadata`` - for existence", "response in the ``response`` field when it completes. Example: >>>", "self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a model.", ">>> dataset = {} >>> >>> response = client.create_dataset(parent, dataset)", "method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request", "of a resource is the last element of the item's", "{} >>> >>> response = client.create_dataset(parent, dataset) Args: parent (str):", "retried. timeout (Optional[float]): The amount of time, in seconds, to", "def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets", "for element in client.list_model_evaluations(parent): ... # process element ... pass", "transport method to add retry and timeout logic. if \"create_dataset\"", "the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]):", "default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation =", "evaluation. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "Args: name (str): Resource name of the model to deploy.", "of using the filter is: - ``translation_dataset_metadata:*`` --> The dataset", ") request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata", "retry=retry, timeout=timeout, metadata=metadata ) def list_models( self, parent, filter_=None, page_size=None,", "dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If", "or implied. # See the License for the specific language", "service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial(", "): \"\"\" Creates a dataset. Example: >>> from google.cloud import", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a dataset and all of", "transport method to add retry and timeout logic. if \"import_data\"", "get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a", "expression for filtering the results of the request. - ``annotation_spec_id``", "# Iterate over all results >>> for element in client.list_model_evaluations(parent):", ">>> # TODO: Initialize `input_config`: >>> input_config = {} >>>", "the like. self._inner_api_calls = {} # Service calls def create_dataset(", "in client.list_models(parent): ... # process element ... pass >>> >>>", ">>> >>> response = client.deploy_model(name) Args: name (str): Resource name", "retry=retry, timeout=timeout, metadata=metadata ) def list_datasets( self, parent, filter_=None, page_size=None,", "transport method to add retry and timeout logic. if \"create_model\"", "the project from which to list datasets. filter_ (str): An", "retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If", "contained in the underlying API response. If page streaming is", "google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import", "to a retryable error and retry attempts failed. ValueError: If", "model) >>> >>> def callback(operation_future): ... # Handle result. ...", "'[MODEL]') >>> >>> response = client.undeploy_model(name) Args: name (str): Resource", "client_config is not None: warnings.warn( \"The `client_config` argument is deprecated.\",", "from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto", "google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] ) # Save a dictionary of cached API", "time >>> for page in client.list_models(parent).pages: ... for element in", "calls def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "method to add retry and timeout logic. if \"deploy_model\" not", "instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason.", "in client.list_datasets(parent).pages: ... for element in page: ... # process", "_INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): \"\"\"Creates", "call functions. # These are the actual callables which invoke", "response_token_field=\"next_page_token\", ) return iterator def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "Handle metadata. >>> metadata = response.metadata() Args: name (str): Required.", ">>> >>> # Iterate over all results >>> for element", "and examples will be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The", "'[DATASET]') >>> >>> # TODO: Initialize `output_config`: >>> output_config =", "`options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any", "The client info used to send a user-agent string along", "metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model`", "self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name) operation =", "output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Exports dataset's data to", "= google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name)", "which to list the models. filter_ (str): An expression for", "parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"],", "model evaluations for. If modelId is set as \"-\", this", "Raise deprecation warnings for things we want to go away.", "\"\"\" Creates a dataset. Example: >>> from google.cloud import automl_v1beta1", "make calls. This argument is mutually exclusive with ``credentials``; providing", "default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"]( request,", "of the request. - ``annotation_spec_id`` - for =, != or", "seconds, to wait for the request to complete. Note that", "self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "\"The `client_config` argument is deprecated.\", PendingDeprecationWarning, stacklevel=2, ) else: client_config", "... pass Args: parent (str): The resource name of the", "filter_ (str): An expression for filtering the results of the", "the model to list the model evaluations for. If modelId", "parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a dataset.", "attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to", "Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed", "location, model): \"\"\"Return a fully-qualified model string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\",", "the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, ) request =", "performed per-page, this determines the maximum number of resources in", "as \"-\", this will list model evaluations from across all", "empty response in the ``response`` field when it completes. Example:", ">>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>> response", "client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')", "automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent =", "(str): Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]):", "to the service. If none are specified, the client will", "Lists datasets in a project. Example: >>> from google.cloud import", "address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info =", ") return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data(", "Generally, you only need to set this if you're developing", "\"\"\" Deletes a dataset and all of its contents. Returns", "Initialize `model`: >>> model = {} >>> >>> response =", "return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self,", "timeout (Optional[float]): The amount of time, in seconds, to wait", "\"get_model_evaluation\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation,", "google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name) operation", "Args: parent (str): Resource name of the project, from which", "case. - ``dataset_id`` - for = or !=. Some examples", "a fully-qualified model string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project, location=location, model=model,", "model_evaluation string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location, model=model, model_evaluation=model_evaluation, )", "and all of its contents. Returns empty response in the", "and timeout logic. if \"deploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"deploy_model\"", "automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>>", "created from a sibling dataset with ID 5. page_size (int):", "\"\"\" Exports dataset's data to a Google Cloud Storage bucket.", "= client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle result.", "that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance.", "to send a user-agent string along with API requests. If", "wrapped with `wrap_method` to add retry, # timeout, and the", "project, location): \"\"\"Return a fully-qualified location string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\",", "\"list_datasets\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, ) request", "client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"]( request, retry=retry, timeout=timeout,", "= _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default", "parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists model", "output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. If a", "client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls[\"import_data\"]( request,", "in the `*_config.py` # file next to this one.) self._method_configs", "metadata=metadata ) def list_models( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "): \"\"\" Creates a model. Returns a Model in the", "an instance of this client using the provided credentials file.", "empty response in the ``response`` field when it completes, and", "Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport instance, responsible for actually making", "'[MODEL_EVALUATION]') >>> >>> response = client.get_model_evaluation(name) Args: name (str): Resource", "Cloud Storage bucket. Returns an empty response in the ``response``", "info will be used. Generally, you only need to set", "method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\",", "... # process element ... pass >>> >>> >>> #", "to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "developing your own client library. \"\"\" # Raise deprecation warnings", "for. If modelId is set as \"-\", this will list", "'[DATASET]') >>> >>> response = client.delete_dataset(name) >>> >>> def callback(operation_future):", "for each method. If not specified, the default configuration is", "service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls[\"import_data\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "instead.\", PendingDeprecationWarning, stacklevel=2, ) # Instantiate the transport. # The", "message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1 import", "as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc", "results >>> for element in client.list_datasets(parent): ... # process element", "a dictionary of cached API call functions. # These are", "= \"automl.googleapis.com:443\" \"\"\"The default address of the service.\"\"\" # The", "google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self, name,", "model was created from a sibling dataset with ID 5.", "copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #", "Google LLC # # Licensed under the Apache License, Version", "the filter are: - ``image_classification_model_metadata:*`` --> The model has image\\_classification\\_model\\_metadata.", "self._client_info = client_info # Parse out the default settings for", ">>> for element in client.list_models(parent): ... # process element ...", "the item is ``{dataset_id}``. \"\"\" SERVICE_ADDRESS = \"automl.googleapis.com:443\" \"\"\"The default", "in writing, software # distributed under the License is distributed", "google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelsRequest( parent=parent,", "first argument and the default transport class as the second", "method to add retry and timeout logic. if \"list_model_evaluations\" not", "to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this", "Returns a ``DeployModelResponse`` in the ``response`` field when it completes.", "metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation`", "self._inner_api_calls: self._inner_api_calls[ \"deploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info,", "return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def location_path(cls, project,", ") request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata", "Args: parent (str): The resource name of the project from", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, ) request =", ") request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls[\"import_data\"]( request, retry=retry,", "protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to", "methods, wrapped with `wrap_method` to add retry, # timeout, and", "= automl_v1beta1.AutoMlClient() >>> >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>>", "under the License. \"\"\"Accesses the google.cloud.automl.v1beta1 AutoMl API.\"\"\" import functools", ">>> >>> response = client.get_dataset(name) Args: name (str): The resource", "data into a dataset. Returns an empty response in the", "a model. If a model is already deployed, this only", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request", "License, Version 2.0 (the \"License\"); # you may not use", "is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises:", "The server never reuses names that it has created after", "retry object used to retry requests. If ``None`` is specified,", "exclusive with ``credentials``; providing both will raise an exception. credentials", "filtering the results of the request. - ``annotation_spec_id`` - for", "Resource name of the model to list the model evaluations", "of the service.\"\"\" # The name of the interface for", "to ascertain the credentials from the environment. This argument is", "from a sibling dataset with ID 5. page_size (int): The", ">>> # Iterate over results one page at a time", "metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def", "the status of the deployed model in the production environment.", "from google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning", "self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"](", "self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info,", ">>> input_config = {} >>> >>> response = client.import_data(name, input_config)", "transport method to add retry and timeout logic. if \"get_dataset\"", "# process element ... pass Args: parent (str): The resource", "The desired input location. If a dict is provided, it", "the License for the specific language governing permissions and #", "the method configuration in the client_config dictionary. _INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\"", "mutually exclusive with ``credentials``; providing both will raise an exception.", "not in self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry,", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a dataset. Example: >>> from", "= client.import_data(name, input_config) >>> >>> def callback(operation_future): ... # Handle", "self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelsRequest( parent=parent, filter=filter_,", ">>> # TODO: Initialize `output_config`: >>> output_config = {} >>>", "google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports import", "results of the request. - ``dataset_metadata`` - for existence of", "the id for the item is ``{dataset_id}``. \"\"\" SERVICE_ADDRESS =", "in the ``response`` field when it completes. When you create", "create a model, several model evaluations are created for it:", "annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "Resource name of the project, from which to list the", "in self._inner_api_calls: self._inner_api_calls[ \"export_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout,", "operation = self._inner_api_calls[\"delete_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic(", "service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial(", "name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model evaluation.", "The dataset to create. If a dict is provided, it", "request=request, items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_model( self,", "next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] ) #", "second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, ) request =", "and timeout for each RPC # from the client configuration.", "retry attempts failed. ValueError: If the parameters are invalid. \"\"\"", "default info will be used. Generally, you only need to", "making the API calls. The default transport uses the gRPC", "google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self, name,", "to create. If a dict is provided, it must be", "Handle metadata. >>> metadata = response.metadata() Args: name (str): The", "to add retry and timeout logic. if \"import_data\" not in", "A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for", "exclusive.\" ) self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS,", "created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. If a", "and does not change the status of the deployed model", ":class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any", "operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT,", "sending data to the service. if transport: if callable(transport): self.transport", "'[LOCATION]') >>> >>> # TODO: Initialize `dataset`: >>> dataset =", "return self._inner_api_calls[\"undeploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_model_evaluation( self,", "list the model evaluations for. If modelId is set as", "= client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `input_config`:", "\"\"\" Deploys model. Returns a ``DeployModelResponse`` in the ``response`` field", "message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "project, location, model, model_evaluation): \"\"\"Return a fully-qualified model_evaluation string.\"\"\" return", "= service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def", ">>> >>> def callback(operation_future): ... # Handle result. ... result", "dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "The transport is responsible for handling serialization and # deserialization", "for this client. This is the key used to #", "from which to list the models. filter_ (str): An expression", "service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def undeploy_model(", "# distributed under the License is distributed on an \"AS", ") return iterator def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "# Unless required by applicable law or agreed to in", "\"list_models\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, ) request", "'[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `output_config`: >>> output_config", ">>> metadata = response.metadata() Args: name (str): Resource name of", "= automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>>", "deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deploys model.", "the project, from which to list the models. filter_ (str):", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "A transport instance, responsible for actually making the API calls.", "parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists models.", ":class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can also be configured to iterate", "metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "def from_service_account_file(cls, filename, *args, **kwargs): \"\"\"Creates an instance of this", "page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout,", "return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location, dataset=dataset, ) @classmethod def model_path(cls,", ">>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over", "= google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name)", "failed due to a retryable error and retry attempts failed.", "same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A", "add retry and timeout logic. if \"deploy_model\" not in self._inner_api_calls:", "the constructor. Returns: AutoMlClient: The constructed client. \"\"\" credentials =", "global evaluation, and one evaluation for each annotation spec. Example:", "the Apache License, Version 2.0 (the \"License\"); # you may", "functions. # These are the actual callables which invoke the", "): \"\"\" Gets a model. Example: >>> from google.cloud import", "request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "name (str): Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]):", "response in the ``response`` field when it completes, and ``delete_details``", "the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is", "model evaluation. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "self._inner_api_calls[ \"export_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, )", ") request = service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator =", "= google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelsRequest(", "both a transport instance and \" \"credentials; these are mutually", "``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for the item is ``{dataset_id}``. \"\"\"", "= response.metadata() Args: parent (str): Resource name of the parent", "model, model_evaluation): \"\"\"Return a fully-qualified model_evaluation string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\",", "number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry", "page at a time >>> for page in client.list_models(parent).pages: ...", "google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto import", "in the client_config dictionary. _INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\" @classmethod def from_service_account_file(cls,", "metadata=metadata ) def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls[\"import_data\"](", "project where the model is being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]):", "metadata=None, ): \"\"\" Gets a model evaluation. Example: >>> from", "all results >>> for element in client.list_model_evaluations(parent): ... # process", "model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. If a dict", "be configured to iterate over the pages of the response", "page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout,", "\"The `channel` argument is deprecated; use \" \"`transport` instead.\", PendingDeprecationWarning,", "credentials=None, client_config=None, client_info=None, ): \"\"\"Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type],", "def location_path(cls, project, location): \"\"\"Return a fully-qualified location string.\"\"\" return", "failed. ValueError: If the parameters are invalid. \"\"\" # Wrap", "transport method to add retry and timeout logic. if \"get_model_evaluation\"", "client.delete_model(name) >>> >>> def callback(operation_future): ... # Handle result. ...", ") def create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "location, dataset): \"\"\"Return a fully-qualified dataset string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\",", "attach to requests. These credentials identify this application to the", "google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning import", "location string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project, location=location, ) @classmethod def", "operation = self._inner_api_calls[\"import_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic(", ") def list_model_evaluations( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.", "instance to ``transport``; doing so will raise an exception. client_config", "def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can also be configured to", ") def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "`*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(", "resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A", "in self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout,", "Args: name (str): Required. The resource name of the dataset.", "which invoke the proper # transport methods, wrapped with `wrap_method`", "translation\\_dataset\\_metadata. page_size (int): The maximum number of resources contained in", "method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request", "of the parent location. filter_ (str): An expression for filtering", "operations_pb2 as longrunning_operations_pb2 from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version", "default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls[\"delete_dataset\"](", "add retry and timeout logic. if \"export_data\" not in self._inner_api_calls:", "'[LOCATION]', '[MODEL]') >>> >>> response = client.get_model(name) Args: name (str):", "argument is mutually exclusive with providing a transport instance to", "self._inner_api_calls[ \"create_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, )", "reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable", "from the environment. This argument is mutually exclusive with providing", "a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "we want to go away. if client_config is not None:", "evaluation for each annotation spec. Example: >>> from google.cloud import", "under the License is distributed on an \"AS IS\" BASIS,", "model is already deployed, this only deletes the model in", "google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object): \"\"\" AutoML", "self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model.", "of resources contained in the underlying API response. If page", "from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object): \"\"\"", "response = client.deploy_model(name) Args: name (str): Resource name of the", "self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a", "with ID different than 4. - ``NOT annotation_spec_id:*`` --> The", "client library. \"\"\" # Raise deprecation warnings for things we", "If a model is already deployed, this only deletes the", "in client.list_datasets(parent): ... # process element ... pass >>> >>>", "underlying API response. If page streaming is performed per- resource,", "the resources with those names are deleted. An ID of", "not in self._inner_api_calls: self._inner_api_calls[ \"get_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry,", "parent (str): Resource name of the model to list the", "add retry and timeout logic. if \"list_datasets\" not in self._inner_api_calls:", "**kwargs): \"\"\"Creates an instance of this client using the provided", "-*- coding: utf-8 -*- # # Copyright 2018 Google LLC", "\"get_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, ) request", "you only need to set this if you're developing your", "self._inner_api_calls[\"get_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_models( self, parent,", "Args: parent (str): The resource name of the project to", "Handle metadata. >>> metadata = response.metadata() Args: parent (str): Resource", "page in client.list_model_evaluations(parent).pages: ... for element in page: ... #", "fully-qualified dataset string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location, dataset=dataset, )", "method to add retry and timeout logic. if \"get_dataset\" not", "= google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name,", "method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request", "The model was created from a sibling dataset with ID", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Imports data into a dataset.", "name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>> response =", "default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size", ">>> # Handle metadata. >>> metadata = response.metadata() Args: parent", "as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object", "the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used", "the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used", "fully-qualified location string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project, location=location, ) @classmethod", "are specified, the client will attempt to ascertain the credentials", "retry and timeout logic. if \"export_data\" not in self._inner_api_calls: self._inner_api_calls[", "to list the models. filter_ (str): An expression for filtering", "model. If a model is already deployed, this only deletes", "permissions and # limitations under the License. \"\"\"Accesses the google.cloud.automl.v1beta1", "- ``model_metadata`` - for existence of the case. - ``dataset_id``", "annotations and examples will be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required.", "``response`` field when it completes. Example: >>> from google.cloud import", "client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry, timeout=timeout,", "name (str): The resource name of the dataset to delete.", "# -*- coding: utf-8 -*- # # Copyright 2018 Google", "default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size )", ">>> response = client.delete_dataset(name) >>> >>> def callback(operation_future): ... #", "this only deletes the model in AutoML BE, and does", "metadata=None, ): \"\"\" Gets a dataset. Example: >>> from google.cloud", "(str): Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]):", "resource, this parameter does not affect the return value. If", "the constructor. kwargs: Additional arguments to pass to the constructor.", "(google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string", "\"-\", this will list model evaluations from across all models", "example below for the last. Some examples of using the", "different than 4. - ``NOT annotation_spec_id:*`` --> The model evaluation", "method to add retry and timeout logic. if \"list_models\" not", "Imports data into a dataset. Returns an empty response in", "at a time >>> for page in client.list_datasets(parent).pages: ... for", "page in client.list_models(parent).pages: ... for element in page: ... #", "dataset_path(cls, project, location, dataset): \"\"\"Return a fully-qualified dataset string.\"\"\" return", "- for =, != or existence. See example below for", "wait for the request to complete. Note that if ``retry``", "ANY KIND, either express or implied. # See the License", "object can also be configured to iterate over the pages", "a model, several model evaluations are created for it: a", "): \"\"\" Imports data into a dataset. Returns an empty", "to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that", "the License. # You may obtain a copy of the", "# file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME]", "\"\"\" Imports data into a dataset. Returns an empty response", "service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls[\"create_model\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "this will list model evaluations from across all models of", "= service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls[\"delete_model\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "--> The model evaluation was done for annotation spec with", "stacklevel=2, ) else: client_config = auto_ml_client_config.config if channel: warnings.warn( \"The", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Undeploys model. Returns an ``UndeployModelResponse``", "import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent", "# See the License for the specific language governing permissions", "... # process element ... pass Args: parent (str): Resource", "a model. Returns a Model in the ``response`` field when", "language governing permissions and # limitations under the License. \"\"\"Accesses", "self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Undeploys model. Returns", "form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry", "a time >>> for page in client.list_datasets(parent).pages: ... for element", "annotation spec with ID different than 4. - ``NOT annotation_spec_id:*``", "import warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config", "key json file. args: Additional arguments to pass to the", "error and retry attempts failed. ValueError: If the parameters are", "client.get_model(name) Args: name (str): Resource name of the model. retry", "location=location, model=model, ) @classmethod def model_evaluation_path(cls, project, location, model, model_evaluation):", "timeout logic. if \"get_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"get_dataset\" ]", "response = client.undeploy_model(name) Args: name (str): Resource name of the", "client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls[\"delete_dataset\"]( request, retry=retry,", "to add retry and timeout logic. if \"get_model_evaluation\" not in", "from_service_account_file @classmethod def location_path(cls, project, location): \"\"\"Return a fully-qualified location", "streaming is performed per- resource, this parameter does not affect", "to add retry and timeout logic. if \"get_dataset\" not in", "(str): Resource name of the model to list the model", "imported annotations and examples will be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]):", "= {} >>> >>> response = client.export_data(name, output_config) >>> >>>", "client.import_data(name, input_config) >>> >>> def callback(operation_future): ... # Handle result.", "Gets a dataset. Example: >>> from google.cloud import automl_v1beta1 >>>", "If the request failed due to a retryable error and", "(Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None``", "argument may also be a callable which returns a transport", "client_config[\"interfaces\"][self._INTERFACE_NAME] ) # Save a dictionary of cached API call", "the ``response`` field when it completes, and ``delete_details`` in the", "name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.deploy_model(name)", "will be sent the credentials as the first argument and", "~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If a dict is provided,", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, ) request =", "name of the project, from which to list the models.", "default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"]( request, retry=retry,", "iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout, metadata=metadata, ),", "= automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>>", "logic. if \"delete_model\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_model\" ] =", "service. if transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport,", "case. An example of using the filter is: - ``translation_dataset_metadata:*``", "value. If page streaming is performed per-page, this determines the", "License. \"\"\"Accesses the google.cloud.automl.v1beta1 AutoMl API.\"\"\" import functools import pkg_resources", "= response.metadata() Args: name (str): The resource name of the", "self._inner_api_calls[ \"get_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, )", "... # process element ... pass Args: parent (str): The", "client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls[\"create_dataset\"]( request, retry=retry,", "if client_config is not None: warnings.warn( \"The `client_config` argument is", "element of the item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "the case. An example of using the filter is: -", ">>> response = client.undeploy_model(name) Args: name (str): Resource name of", "one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] ) # Save a dictionary", "dataset to create. If a dict is provided, it must", "exception. client_config (dict): DEPRECATED. A dictionary of call options for", "and timeout logic. if \"get_model\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model\"", "By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This", ") request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator =", "of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a model. Returns a Model", "writing, software # distributed under the License is distributed on", "from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto", ">>> response = client.get_dataset(name) Args: name (str): The resource name", "- ``annotation_spec_id!=4`` --> The model evaluation was done for annotation", "is deprecated; use \" \"`transport` instead.\", PendingDeprecationWarning, stacklevel=2, ) #", "completes. When you create a model, several model evaluations are", "and retry attempts failed. ValueError: If the parameters are invalid.", "metadata=None, ): \"\"\" Creates a dataset. Example: >>> from google.cloud", "parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason.", "using the filter are: - ``image_classification_model_metadata:*`` --> The model has", "parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"],", "client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.delete_model(name) >>> >>>", "parent (str): Resource name of the project, from which to", "timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional", "the defaults specified in the `*_config.py` # file next to", ">>> response = client.create_dataset(parent, dataset) Args: parent (str): The resource", "parent (str): The resource name of the project to create", "if \"delete_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\" ] = google.api_core.gapic_v1.method.wrap_method(", "evaluations. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "model string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project, location=location, model=model, ) @classmethod", "'[LOCATION]', '[MODEL]') >>> >>> response = client.deploy_model(name) Args: name (str):", "\"projects/{project}/locations/{location}/models/{model}\", project=project, location=location, model=model, ) @classmethod def model_evaluation_path(cls, project, location,", "\"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location, dataset=dataset, ) @classmethod def model_path(cls, project, location,", "= service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls[\"create_model\"]( request, retry=retry, timeout=timeout, metadata=metadata", "empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "example of using the filter is: - ``translation_dataset_metadata:*`` --> The", "name (str): Required. The resource name of the dataset. output_config", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a model. If a", "API response. If page streaming is performed per- resource, this", "metadata=None, ): \"\"\" Creates a model. Returns a Model in", "of call options for each method. If not specified, the", ") def list_datasets( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "Iterate over all results >>> for element in client.list_models(parent): ...", "Required. The resource name of the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]):", "providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization", "auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from", "of the request. - ``model_metadata`` - for existence of the", "Args: parent (str): Resource name of the model to list", "default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object", "for filtering the results of the request. - ``dataset_metadata`` -", "the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the", "self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a dataset.", "--> The dataset has translation\\_dataset\\_metadata. page_size (int): The maximum number", "\"\"\" Gets a model evaluation. Example: >>> from google.cloud import", "raise an exception. client_config (dict): DEPRECATED. A dictionary of call", "Args: filename (str): The path to the service account private", "Alternatively: >>> >>> # Iterate over results one page at", "method to add retry and timeout logic. if \"create_dataset\" not", "if \"export_data\" not in self._inner_api_calls: self._inner_api_calls[ \"export_data\" ] = google.api_core.gapic_v1.method.wrap_method(", "the pages of the response through the `options` parameter. Raises:", "to the constructor. Returns: AutoMlClient: The constructed client. \"\"\" credentials", "created for it: a global evaluation, and one evaluation for", "image\\_classification\\_model\\_metadata. - ``dataset_id=5`` --> The model was created from a", "each method. If not specified, the default configuration is used.", "are mutually exclusive.\" ) self.transport = transport else: self.transport =", "default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used", "request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "*args, **kwargs): \"\"\"Creates an instance of this client using the", "used to send a user-agent string along with API requests.", "= client.get_dataset(name) Args: name (str): The resource name of the", "self._inner_api_calls: self._inner_api_calls[ \"create_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info,", "will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to", "output_config = {} >>> >>> response = client.export_data(name, output_config) >>>", "timeout logic. if \"export_data\" not in self._inner_api_calls: self._inner_api_calls[ \"export_data\" ]", "= google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest(", "to deploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry", "timeout logic. if \"undeploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\" ]", "the request. - ``model_metadata`` - for existence of the case.", "= client.undeploy_model(name) Args: name (str): Resource name of the model", "metadata = response.metadata() Args: name (str): Required. The resource name", "input location. If a dict is provided, it must be", "and timeout logic. if \"create_model\" not in self._inner_api_calls: self._inner_api_calls[ \"create_model\"", "self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return", "Exports dataset's data to a Google Cloud Storage bucket. Returns", "self._inner_api_calls[\"delete_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client,", "client.list_datasets(parent).pages: ... for element in page: ... # process element", "``DeployModelResponse`` in the ``response`` field when it completes. Example: >>>", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a model. Returns a", "): \"\"\" Lists models. Example: >>> from google.cloud import automl_v1beta1", "of using the filter are: - ``image_classification_model_metadata:*`` --> The model", "the google.cloud.automl.v1beta1 AutoMl API.\"\"\" import functools import pkg_resources import warnings", "from the client configuration. # (Ordinarily, these are the defaults", "time >>> for page in client.list_model_evaluations(parent).pages: ... for element in", "= service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def", "the model to deploy. retry (Optional[google.api_core.retry.Retry]): A retry object used", "maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A", "deserialization and actually sending data to the service. if transport:", ") request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator =", "filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists datasets in", "service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as", "a fully-qualified location string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}\", project=project, location=location, )", "'[LOCATION]', '[DATASET]') >>> >>> response = client.get_dataset(name) Args: name (str):", "functools import pkg_resources import warnings from google.oauth2 import service_account import", "element ... pass >>> >>> >>> # Alternatively: >>> >>>", "add retry and timeout logic. if \"undeploy_model\" not in self._inner_api_calls:", "), request=request, items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_dataset(", "if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else:", "client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls[\"export_data\"]( request,", "data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 from", "import io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2", "argument is deprecated.\", PendingDeprecationWarning, stacklevel=2, ) else: client_config = auto_ml_client_config.config", "for annotation spec with ID different than 4. - ``NOT", "existence of the case. - ``dataset_id`` - for = or", "self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT,", "retry=retry, timeout=timeout, metadata=metadata ) def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent, model=model) operation", "A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for", "Lists model evaluations. Example: >>> from google.cloud import automl_v1beta1 >>>", "and actually sending data to the service. if transport: if", "\"\"\" Undeploys model. Returns an ``UndeployModelResponse`` in the ``response`` field", "import dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a dataset. Example: >>>", "A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for", "evaluations for. If modelId is set as \"-\", this will", "user-agent string along with API requests. If ``None``, then default", "dataset string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location, dataset=dataset, ) @classmethod", "to wait for the request to complete. Note that if", "import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1", "all models of the parent location. filter_ (str): An expression", "): \"\"\"Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport", "of cached API call functions. # These are the actual", "add retry and timeout logic. if \"import_data\" not in self._inner_api_calls:", "was done for annotation spec with ID different than 4.", "if channel: warnings.warn( \"The `channel` argument is deprecated; use \"", "io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2 from", "credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials: raise ValueError( \"Received both", "request. - ``annotation_spec_id`` - for =, != or existence. See", "TODO: Initialize `output_config`: >>> output_config = {} >>> >>> response", "by the server. The server never reuses names that it", "client configuration. # (Ordinarily, these are the defaults specified in", "google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name) return", "metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "This argument is mutually exclusive with ``credentials``; providing both will", "list_models( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "-*- # # Copyright 2018 Google LLC # # Licensed", "timeout=timeout, metadata=metadata ) def list_model_evaluations( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT,", "set as \"-\", this will list model evaluations from across", "defaults specified in the `*_config.py` # file next to this", "not in self._inner_api_calls: self._inner_api_calls[ \"export_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry,", "location. If a dict is provided, it must be of", "is the key used to # find the method configuration", "Returns: AutoMlClient: The constructed client. \"\"\" credentials = service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"]", "class as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel``", "is not None: warnings.warn( \"The `client_config` argument is deprecated.\", PendingDeprecationWarning,", ">>> response = client.get_model_evaluation(name) Args: name (str): Resource name for", "will raise an exception. client_config (dict): DEPRECATED. A dictionary of", "method to add retry and timeout logic. if \"import_data\" not", "dataset): \"\"\"Return a fully-qualified dataset string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project,", "expression for filtering the results of the request. - ``dataset_metadata``", "Callables will be sent the credentials as the first argument", "to add retry, # timeout, and the like. self._inner_api_calls =", "parent (str): The resource name of the project from which", "in page: ... # process element ... pass Args: parent", "(str): The path to the service account private key json", "\"list_datasets\" not in self._inner_api_calls: self._inner_api_calls[ \"list_datasets\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets,", "of the project, from which to list the models. filter_", "self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model", "self._inner_api_calls[ \"import_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, )", "google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>>", "gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info #", "method to add retry and timeout logic. if \"undeploy_model\" not", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. If", "metadata. >>> metadata = response.metadata() Args: name (str): Required. The", "and timeout logic. if \"import_data\" not in self._inner_api_calls: self._inner_api_calls[ \"import_data\"", "the request failed due to a retryable error and retry", "default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"]( request, retry=retry,", "the filter are: - ``annotation_spec_id!=4`` --> The model evaluation was", "constructor. Returns: AutoMlClient: The constructed client. \"\"\" credentials = service_account.Credentials.from_service_account_file(filename)", "if \"deploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"deploy_model\" ] = google.api_core.gapic_v1.method.wrap_method(", "response. If page streaming is performed per- resource, this parameter", "project=project, location=location, dataset=dataset, ) @classmethod def model_path(cls, project, location, model):", "default_retry=self._method_configs[\"DeleteModel\"].retry, default_timeout=self._method_configs[\"DeleteModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls[\"delete_model\"](", "bucket. Returns an empty response in the ``response`` field when", "to complete. Note that if ``retry`` is specified, the timeout", "# Parse out the default settings for retry and timeout", "retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return", "timeout=timeout, metadata=metadata ) def list_datasets( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT,", "the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create.", "to the service. if transport: if callable(transport): self.transport = transport(", "name (str): The resource name of the dataset to retrieve.", "name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.undeploy_model(name)", "request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls[\"import_data\"]( request, retry=retry, timeout=timeout,", "when it completes. Example: >>> from google.cloud import automl_v1beta1 >>>", "the transport. # The transport is responsible for handling serialization", "pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError:", "the production environment. Returns ``google.protobuf.Empty`` in the ``response`` field when", "callback(operation_future): ... # Handle result. ... result = operation_future.result() >>>", "= automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>>", "the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the", "credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These", "names are deleted. An ID of a resource is the", "to make calls. This argument is mutually exclusive with ``credentials``;", "response.metadata() Args: name (str): Required. Dataset name. Dataset must already", "instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances.", "metadata = response.metadata() Args: parent (str): Resource name of the", "client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model_evaluation\", request_token_field=\"page_token\",", "model to create. If a dict is provided, it must", "timeout logic. if \"get_model\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model\" ]", "specific language governing permissions and # limitations under the License.", "``retry`` is specified, the timeout applies to each individual attempt.", "to ``transport``; doing so will raise an exception. client_config (dict):", "Creates a dataset. Example: >>> from google.cloud import automl_v1beta1 >>>", "self._inner_api_calls[ \"undeploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, )", "authorization credentials to attach to requests. These credentials identify this", "model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "fully-qualified model string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project, location=location, model=model, )", ") request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls[\"create_dataset\"]( request, retry=retry, timeout=timeout,", "prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc from", "completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator", "# Handle metadata. >>> metadata = response.metadata() Args: parent (str):", "calls. The default transport uses the gRPC protocol. This argument", "``annotation_spec_id`` - for =, != or existence. See example below", "= self._inner_api_calls[\"delete_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation,", "last element of the item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then", "arguments to pass to the constructor. Returns: AutoMlClient: The constructed", "request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "logic. if \"get_model_evaluation\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\" ] =", "@classmethod def location_path(cls, project, location): \"\"\"Return a fully-qualified location string.\"\"\"", "= service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls[\"create_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A", "request, retry=retry, timeout=timeout, metadata=metadata ) def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT,", "for the last. Some examples of using the filter are:", "warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import", "# you may not use this file except in compliance", "name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model. Example:", "for existence of the case. - ``dataset_id`` - for =", "invoke the proper # transport methods, wrapped with `wrap_method` to", "self._inner_api_calls[\"get_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_datasets( self, parent,", "a time >>> for page in client.list_model_evaluations(parent).pages: ... for element", "handling serialization and # deserialization and actually sending data to", "name (str): Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A", "a Google Cloud Storage bucket. Returns an empty response in", "of the model to list the model evaluations for. If", "constructed client. \"\"\" credentials = service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] = credentials return", ">>> >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> #", "self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation", ">>> >>> # Alternatively: >>> >>> # Iterate over results", "= client_info # Parse out the default settings for retry", "last. Some examples of using the filter are: - ``annotation_spec_id!=4``", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deploys model. Returns a ``DeployModelResponse``", "deployed, this only deletes the model in AutoML BE, and", "__init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, ): \"\"\"Constructor. Args:", "google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from", "ascertain the credentials from the environment. This argument is mutually", "in self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout,", "of the interface for this client. This is the key", "set this if you're developing your own client library. \"\"\"", "timeout logic. if \"import_data\" not in self._inner_api_calls: self._inner_api_calls[ \"import_data\" ]", "= google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name)", "self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"](", "If the parameters are invalid. \"\"\" # Wrap the transport", "'[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>> response = client.get_model_evaluation(name) Args: name", "= client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `model`: >>>", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, ) request =", "self._inner_api_calls[\"deploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def undeploy_model( self, name,", "instance of this client using the provided credentials file. Args:", "annotation specs. page_size (int): The maximum number of resources contained", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Imports data into a dataset. Returns", "pkg_resources import warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import", "for the item is ``{dataset_id}``. \"\"\" SERVICE_ADDRESS = \"automl.googleapis.com:443\" \"\"\"The", "name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.get_dataset(name)", "name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a dataset and", "requests. These credentials identify this application to the service. If", "auto_ml_client_config.config if channel: warnings.warn( \"The `channel` argument is deprecated; use", "timeout logic. if \"delete_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\" ]", "to list datasets. filter_ (str): An expression for filtering the", "name (str): Resource name of the model to undeploy. retry", "all results >>> for element in client.list_datasets(parent): ... # process", ") request = service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls[\"create_model\"]( request, retry=retry,", "under the Apache License, Version 2.0 (the \"License\"); # you", "google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto import", "\"create_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"create_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset,", "The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]):", "\"google.cloud.automl.v1beta1.AutoMl\" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): \"\"\"Creates an instance", "provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError:", "google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name) return", "to add retry and timeout logic. if \"list_datasets\" not in", "# Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback)", "any reason. google.api_core.exceptions.RetryError: If the request failed due to a", "= client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.get_model(name) Args:", "field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "\"\"\" Lists models. Example: >>> from google.cloud import automl_v1beta1 >>>", "iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can also be configured", "provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError:", "project from which to list datasets. filter_ (str): An expression", "- ``dataset_metadata`` - for existence of the case. An example", "a global evaluation, and one evaluation for each annotation spec.", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deletes a model. If a model", "... pass Args: parent (str): Resource name of the project,", "request failed for any reason. google.api_core.exceptions.RetryError: If the request failed", "(str): Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry", "model_evaluation): \"\"\"Return a fully-qualified model_evaluation string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project,", "models. filter_ (str): An expression for filtering the results of", "client_info # Parse out the default settings for retry and", "service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls[\"create_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def", ">>> # TODO: Initialize `dataset`: >>> dataset = {} >>>", "go away. if client_config is not None: warnings.warn( \"The `client_config`", "a transport instance and \" \"credentials; these are mutually exclusive.\"", "client.list_datasets(parent): ... # process element ... pass >>> >>> >>>", "Args: name (str): Resource name of the model to undeploy.", "= self._inner_api_calls[\"delete_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation,", "# Iterate over all results >>> for element in client.list_models(parent):", "google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 from", "is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By", "it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "= service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls[\"export_data\"]( request, retry=retry, timeout=timeout, metadata=metadata", "``google.protobuf.Empty`` in the ``response`` field when it completes, and ``delete_details``", "request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls[\"export_data\"]( request, retry=retry, timeout=timeout,", ":class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any", "and ``delete_details`` in the ``metadata`` field. Example: >>> from google.cloud", "a project. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_models(", "request, retry=retry, timeout=timeout, metadata=metadata ) def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "args: Additional arguments to pass to the constructor. kwargs: Additional", "parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a model.", "result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>>", "\"credentials; these are mutually exclusive.\" ) self.transport = transport else:", "retry=retry, timeout=timeout, metadata=metadata ) def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport", ">>> # Alternatively: >>> >>> # Iterate over results one", "import google.api_core.page_iterator import google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config", "project to create the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The", "self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_model_evaluations( self, parent,", "of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can also be configured to", "= client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.deploy_model(name) Args:", "response = client.import_data(name, input_config) >>> >>> def callback(operation_future): ... #", "to add retry and timeout logic. if \"list_models\" not in", "instance and \" \"credentials; these are mutually exclusive.\" ) self.transport", "own client library. \"\"\" # Raise deprecation warnings for things", "and the default transport class as the second argument. channel", "= {} # Service calls def create_dataset( self, parent, dataset,", "a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 #", "timeout=timeout, metadata=metadata, ), request=request, items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator", "A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for", ") # Instantiate the transport. # The transport is responsible", "retry and timeout logic. if \"get_dataset\" not in self._inner_api_calls: self._inner_api_calls[", "instance, responsible for actually making the API calls. The default", "is specified, requests will not be retried. timeout (Optional[float]): The", "method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request", "longrunning_operations_pb2 from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object):", "retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"datasets\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return", "of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object", "it: a global evaluation, and one evaluation for each annotation", "timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, )", "default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"]( request, retry=retry,", "examples of using the filter are: - ``image_classification_model_metadata:*`` --> The", "5. page_size (int): The maximum number of resources contained in", "kwargs[\"credentials\"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod", "the gRPC protocol. This argument may also be a callable", "sent the credentials as the first argument and the default", "at a time >>> for page in client.list_models(parent).pages: ... for", "transport instance, responsible for actually making the API calls. The", "Dataset name. Dataset must already exist. All imported annotations and", "the client configuration. # (Ordinarily, these are the defaults specified", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model evaluation. Example: >>>", "for element in page: ... # process element ... pass", "def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets", "iterate over the pages of the response through the `options`", "# process element ... pass Args: parent (str): Resource name", "\"list_models\" not in self._inner_api_calls: self._inner_api_calls[ \"list_models\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models,", "logic. if \"undeploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\" ] =", "request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "--> The model evaluation was done for aggregate of all", "transport method to add retry and timeout logic. if \"undeploy_model\"", ") request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry, timeout=timeout, metadata=metadata", "import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name", "``translation_dataset_metadata:*`` --> The dataset has translation\\_dataset\\_metadata. page_size (int): The maximum", "timeout=timeout, metadata=metadata, ), request=request, items_field=\"model_evaluation\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator", "data to the service. if transport: if callable(transport): self.transport =", ">>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]')", ") self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel,", "if \"import_data\" not in self._inner_api_calls: self._inner_api_calls[ \"import_data\" ] = google.api_core.gapic_v1.method.wrap_method(", ">>> >>> response = client.get_model(name) Args: name (str): Resource name", "deprecation warnings for things we want to go away. if", "deploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "method to add retry and timeout logic. if \"get_model_evaluation\" not", "google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import", "are the actual callables which invoke the proper # transport", "the request to complete. Note that if ``retry`` is specified,", "configuration in the client_config dictionary. _INTERFACE_NAME = \"google.cloud.automl.v1beta1.AutoMl\" @classmethod def", "Apache License, Version 2.0 (the \"License\"); # you may not", "return self._inner_api_calls[\"get_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_datasets( self,", "either express or implied. # See the License for the", "The name of the interface for this client. This is", "things we want to go away. if client_config is not", "and timeout logic. if \"get_model_evaluation\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\"", "License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If", "= google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request,", "of the model to deploy. retry (Optional[google.api_core.retry.Retry]): A retry object", "(grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make calls.", "default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"]( request,", "the underlying API response. If page streaming is performed per-", "argument is deprecated; use \" \"`transport` instead.\", PendingDeprecationWarning, stacklevel=2, )", ">>> response = client.export_data(name, output_config) >>> >>> def callback(operation_future): ...", "attempts failed. ValueError: If the parameters are invalid. \"\"\" #", "\"import_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, ) request", "response.metadata() Args: name (str): Required. The resource name of the", "if you're developing your own client library. \"\"\" # Raise", "the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info", "prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2 from", "create the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to", "= service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None,", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists models. Example: >>> from google.cloud", ":class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation`", "are created for it: a global evaluation, and one evaluation", "deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", ") request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls[\"export_data\"]( request, retry=retry,", "metadata=None, ): \"\"\" Lists models. Example: >>> from google.cloud import", "client.list_models(parent).pages: ... for element in page: ... # process element", "will be used. Generally, you only need to set this", "create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates", "add retry and timeout logic. if \"list_model_evaluations\" not in self._inner_api_calls:", "'[LOCATION]', '[MODEL]') >>> >>> response = client.delete_model(name) >>> >>> def", "logic. if \"deploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"deploy_model\" ] =", "below for the last. Some examples of using the filter", "name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Exports dataset's data", "default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"]( request,", "already deployed, this only deletes the model in AutoML BE,", "from google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2", "The resource name of the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required.", "else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info", "or !=. Some examples of using the filter are: -", "client=None, method=functools.partial( self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model\", request_token_field=\"page_token\",", "= self._inner_api_calls[\"import_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation,", "list model evaluations from across all models of the parent", "request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls[\"create_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata", "google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name, input_config=input_config)", "streaming is performed per-page, this determines the maximum number of", "input_config=input_config) operation = self._inner_api_calls[\"import_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Undeploys model.", "transport method to add retry and timeout logic. if \"get_model\"", "**kwargs) from_service_account_json = from_service_account_file @classmethod def location_path(cls, project, location): \"\"\"Return", "to the constructor. kwargs: Additional arguments to pass to the", "filter are: - ``image_classification_model_metadata:*`` --> The model has image\\_classification\\_model\\_metadata. -", "pass to the constructor. Returns: AutoMlClient: The constructed client. \"\"\"", "for page in client.list_models(parent).pages: ... for element in page: ...", ">>> response = client.delete_model(name) >>> >>> def callback(operation_future): ... #", "channel: warnings.warn( \"The `channel` argument is deprecated; use \" \"`transport`", "response.metadata() Args: name (str): Resource name of the model being", "Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed", "def __init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, ): \"\"\"Constructor.", "credentials identify this application to the service. If none are", "is mutually exclusive with providing a transport instance to ``transport``;", "= operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata.", "request, retry=retry, timeout=timeout, metadata=metadata ) def list_datasets( self, parent, filter_=None,", "already exist. All imported annotations and examples will be added.", "is specified, the timeout applies to each individual attempt. metadata", "of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If", "are: - ``annotation_spec_id!=4`` --> The model evaluation was done for", "model): \"\"\"Return a fully-qualified model string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}\", project=project,", "parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `model`:", "= automl_v1beta1.AutoMlClient() >>> >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]')", "# limitations under the License. \"\"\"Accesses the google.cloud.automl.v1beta1 AutoMl API.\"\"\"", "file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] )", "you're developing your own client library. \"\"\" # Raise deprecation", "the return value. If page streaming is performed per-page, this", "timeout logic. if \"list_models\" not in self._inner_api_calls: self._inner_api_calls[ \"list_models\" ]", ">>> # Handle metadata. >>> metadata = response.metadata() Args: name", "API requests. If ``None``, then default info will be used.", "list the models. filter_ (str): An expression for filtering the", "to add retry and timeout logic. if \"list_model_evaluations\" not in", "not in self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry,", "to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If", "so will raise an exception. client_config (dict): DEPRECATED. A dictionary", "client_config=None, client_info=None, ): \"\"\"Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]):", "the ``response`` field when it completes. Example: >>> from google.cloud", "with API requests. If ``None``, then default info will be", "process element ... pass Args: parent (str): The resource name", "name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for the item is", "output_config=output_config) operation = self._inner_api_calls[\"export_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return", "'[LOCATION]') >>> >>> # Iterate over all results >>> for", "import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers", "Iterate over results one page at a time >>> for", "Dataset must already exist. All imported annotations and examples will", "filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry,", "in the underlying API response. If page streaming is performed", "response_token_field=\"next_page_token\", ) return iterator def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "google.api_core.exceptions.RetryError: If the request failed due to a retryable error", "= google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request,", "the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]):", "used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a", "resource name of the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The", "(str): Resource name of the project, from which to list", "self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config[\"interfaces\"][self._INTERFACE_NAME] ) # Save a dictionary of", "Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object", ") else: if credentials: raise ValueError( \"Received both a transport", "model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "are invalid. \"\"\" # Wrap the transport method to add", "logic. if \"list_datasets\" not in self._inner_api_calls: self._inner_api_calls[ \"list_datasets\" ] =", "location_path(cls, project, location): \"\"\"Return a fully-qualified location string.\"\"\" return google.api_core.path_template.expand(", "and timeout logic. if \"delete_model\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_model\"", "examples of using the filter are: - ``annotation_spec_id!=4`` --> The", "performed per- resource, this parameter does not affect the return", "existence of the case. An example of using the filter", "is already deployed, this only deletes the model in AutoML", "= google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name,", "the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]):", "model. Returns an ``UndeployModelResponse`` in the ``response`` field when it", ">>> # Iterate over all results >>> for element in", "the first argument and the default transport class as the", "use this file except in compliance with the License. #", "credentials to attach to requests. These credentials identify this application", "through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed", "resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for the item", "import google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template import grpc", "request = service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls[\"create_model\"]( request, retry=retry, timeout=timeout,", "google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) operation", "dict is provided, it must be of the same form", "- for = or !=. Some examples of using the", "resource names are assigned by the server. The server never", "items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_model( self, name,", "location, model, model_evaluation): \"\"\"Return a fully-qualified model_evaluation string.\"\"\" return google.api_core.path_template.expand(", "self._inner_api_calls[ \"get_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, )", "request. - ``model_metadata`` - for existence of the case. -", "default transport class as the second argument. channel (grpc.Channel): DEPRECATED.", "expression for filtering the results of the request. - ``model_metadata``", "in the production environment. Returns ``google.protobuf.Empty`` in the ``response`` field", ">>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response", "logic. if \"delete_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\" ] =", "self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) operation =", "in self._inner_api_calls: self._inner_api_calls[ \"deploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout,", "from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports", "client.list_model_evaluations(parent).pages: ... for element in page: ... # process element", "deprecated.\", PendingDeprecationWarning, stacklevel=2, ) else: client_config = auto_ml_client_config.config if channel:", "to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If", ") request = service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls[\"delete_dataset\"]( request, retry=retry, timeout=timeout,", ">>> response = client.import_data(name, input_config) >>> >>> def callback(operation_future): ...", "stacklevel=2, ) # Instantiate the transport. # The transport is", "method to add retry and timeout logic. if \"get_model\" not", "\"deploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"deploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model,", "modelId is set as \"-\", this will list model evaluations", "\"\"\" credentials = service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] = credentials return cls(*args, **kwargs)", ") def import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object", "from_service_account_file(cls, filename, *args, **kwargs): \"\"\"Creates an instance of this client", "pass Args: parent (str): The resource name of the project", "the ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>>", "need to set this if you're developing your own client", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists models. Example: >>> from", "self._inner_api_calls: self._inner_api_calls[ \"delete_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info,", "project=project, location=location, ) @classmethod def dataset_path(cls, project, location, dataset): \"\"\"Return", "google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_model_evaluations\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model_evaluation\",", ">>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]',", "provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError:", "(str): Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A", "in compliance with the License. # You may obtain a", "to pass to the constructor. kwargs: Additional arguments to pass", "an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can also be", "want to go away. if client_config is not None: warnings.warn(", "software # distributed under the License is distributed on an", "request, retry=retry, timeout=timeout, metadata=metadata ) def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT,", ">>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO:", "= client.delete_model(name) >>> >>> def callback(operation_future): ... # Handle result.", "input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location. If a", "when it completes, and ``delete_details`` in the ``metadata`` field. Example:", "def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Undeploys", "and timeout logic. if \"get_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"get_dataset\"", ") return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model(", "self._inner_api_calls[ \"get_model_evaluation\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, )", ">>> >>> >>> # Alternatively: >>> >>> # Iterate over", "google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation import", "self._inner_api_calls: self._inner_api_calls[ \"list_model_evaluations\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info,", "evaluations from across all models of the parent location. filter_", "automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> #", "not affect the return value. If page streaming is performed", "return iterator def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls[\"list_datasets\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"datasets\",", "using the provided credentials file. Args: filename (str): The path", "= client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `output_config`:", "automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name =", "credentials from the environment. This argument is mutually exclusive with", "self._inner_api_calls[ \"deploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs[\"DeployModel\"].retry, default_timeout=self._method_configs[\"DeployModel\"].timeout, client_info=self._client_info, )", "import service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning import operations_pb2", "(Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport instance, responsible for actually", "service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls[\"get_model_evaluation\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_model_evaluations(", "the parent project where the model is being created. model", "If none are specified, the client will attempt to ascertain", "model. Returns a ``DeployModelResponse`` in the ``response`` field when it", "\"get_model_evaluation\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, ) request", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Gets a model evaluation. Example:", "logic. if \"import_data\" not in self._inner_api_calls: self._inner_api_calls[ \"import_data\" ] =", "google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent, model=model)", "those names are deleted. An ID of a resource is", "The authorization credentials to attach to requests. These credentials identify", "assigned by the server. The server never reuses names that", "response = client.create_dataset(parent, dataset) Args: parent (str): The resource name", "operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self, name, output_config,", "instance. Callables will be sent the credentials as the first", "to add retry and timeout logic. if \"deploy_model\" not in", "= client.get_model_evaluation(name) Args: name (str): Resource name for the model", "# # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "retry and timeout logic. if \"import_data\" not in self._inner_api_calls: self._inner_api_calls[", "@classmethod def dataset_path(cls, project, location, dataset): \"\"\"Return a fully-qualified dataset", "client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `model`: >>> model", "=, != or existence. See example below for the last.", "\"\"\" # Wrap the transport method to add retry and", "for handling serialization and # deserialization and actually sending data", "google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self, name,", "\"create_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, ) request", "metadata = response.metadata() Args: name (str): Required. Dataset name. Dataset", "return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self,", "# Iterate over all results >>> for element in client.list_datasets(parent):", "resource is the last element of the item's resource name.", "client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')", "self._inner_api_calls[\"undeploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_model_evaluation( self, name,", "pass to the constructor. kwargs: Additional arguments to pass to", "model evaluations from across all models of the parent location.", "self._inner_api_calls[\"export_data\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client,", "self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists", "= client.create_dataset(parent, dataset) Args: parent (str): The resource name of", "several model evaluations are created for it: a global evaluation,", "for filtering the results of the request. - ``model_metadata`` -", "name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deploys model. Returns a", "along with API requests. If ``None``, then default info will", "= {} >>> >>> response = client.create_dataset(parent, dataset) Args: parent", "] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry, default_timeout=self._method_configs[\"GetModelEvaluation\"].timeout, client_info=self._client_info, ) request =", "add retry and timeout logic. if \"get_model\" not in self._inner_api_calls:", "If the request failed for any reason. google.api_core.exceptions.RetryError: If the", "with the License. # You may obtain a copy of", "from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto", ":class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset`", "AutoML Server API. The resource names are assigned by the", "the server. The server never reuses names that it has", "item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for the", "client.get_dataset(name) Args: name (str): The resource name of the dataset", "(dict): DEPRECATED. A dictionary of call options for each method.", "to add retry and timeout logic. if \"create_dataset\" not in", "LLC # # Licensed under the Apache License, Version 2.0", "\"import_data\" not in self._inner_api_calls: self._inner_api_calls[ \"import_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data,", "'[LOCATION]', '[MODEL]') >>> >>> response = client.undeploy_model(name) Args: name (str):", "Returns empty response in the ``response`` field when it completes,", "The path to the service account private key json file.", "(str): Resource name of the parent project where the model", "data to a Google Cloud Storage bucket. Returns an empty", "logic. if \"get_model\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model\" ] =", "Gets a model evaluation. Example: >>> from google.cloud import automl_v1beta1", "= google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent,", "all results >>> for element in client.list_models(parent): ... # process", "is set as \"-\", this will list model evaluations from", "express or implied. # See the License for the specific", "add retry and timeout logic. if \"create_dataset\" not in self._inner_api_calls:", "except in compliance with the License. # You may obtain", "A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of", "API calls. The default transport uses the gRPC protocol. This", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists datasets in a project. Example:", "self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT,", "transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else:", "providing a transport instance to ``transport``; doing so will raise", "resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A", "metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "= client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # Iterate over all", "name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry", "default address of the service.\"\"\" # The name of the", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Undeploys model. Returns an ``UndeployModelResponse`` in", "this is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can", "~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. If a dict is", "not in self._inner_api_calls: self._inner_api_calls[ \"delete_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs[\"DeleteModel\"].retry,", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "metadata=None, ): \"\"\" Deletes a model. If a model is", "service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls[\"export_data\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "client info used to send a user-agent string along with", "is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version =", "number of resources contained in the underlying API response. If", "these are mutually exclusive.\" ) self.transport = transport else: self.transport", "google.api_core.path_template.expand( \"projects/{project}/locations/{location}/datasets/{dataset}\", project=project, location=location, dataset=dataset, ) @classmethod def model_path(cls, project,", "self._inner_api_calls: self._inner_api_calls[ \"export_data\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info,", "a time >>> for page in client.list_models(parent).pages: ... for element", "project, from which to list the models. filter_ (str): An", "default settings for retry and timeout for each RPC #", "callable which returns a transport instance. Callables will be sent", "as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance", ") @classmethod def dataset_path(cls, project, location, dataset): \"\"\"Return a fully-qualified", "retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata,", ":class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "CONDITIONS OF ANY KIND, either express or implied. # See", "name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry", "service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import", "identify this application to the service. If none are specified,", ":class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any", "service account private key json file. args: Additional arguments to", "class AutoMlClient(object): \"\"\" AutoML Server API. The resource names are", "configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to", "a ``DeployModelResponse`` in the ``response`` field when it completes. Example:", "Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry", "across all models of the parent location. filter_ (str): An", "str]]]): Additional metadata that is provided to the method. Returns:", "default_timeout=self._method_configs[\"CreateModel\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls[\"create_model\"](", "be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model`", "pass Args: parent (str): Resource name of the project, from", "= from_service_account_file @classmethod def location_path(cls, project, location): \"\"\"Return a fully-qualified", "of the item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id", "_GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings", "method to add retry and timeout logic. if \"delete_dataset\" not", "timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Exports dataset's data to a Google", "retryable error and retry attempts failed. ValueError: If the parameters", "will attempt to ascertain the credentials from the environment. This", ":class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object", "the service. If none are specified, the client will attempt", "the transport method to add retry and timeout logic. if", "uses the gRPC protocol. This argument may also be a", "file. Args: filename (str): The path to the service account", "``image_classification_model_metadata:*`` --> The model has image\\_classification\\_model\\_metadata. - ``dataset_id=5`` --> The", "service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls[\"delete_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return", "timeout=timeout, metadata=metadata ) def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", ") request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata", "page in client.list_datasets(parent).pages: ... for element in page: ... #", "are assigned by the server. The server never reuses names", "as longrunning_operations_pb2 from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version class", "not in self._inner_api_calls: self._inner_api_calls[ \"get_model_evaluation\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs[\"GetModelEvaluation\"].retry,", "model, several model evaluations are created for it: a global", ") def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if", "in seconds, to wait for the request to complete. Note", ") def __init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, ):", "= client.export_data(name, output_config) >>> >>> def callback(operation_future): ... # Handle", "``dataset_metadata`` - for existence of the case. An example of", "argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to", ">>> >>> response = client.delete_dataset(name) >>> >>> def callback(operation_future): ...", "Some examples of using the filter are: - ``image_classification_model_metadata:*`` -->", "credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION", "self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials: raise", "'[MODEL]', '[MODEL_EVALUATION]') >>> >>> response = client.get_model_evaluation(name) Args: name (str):", "google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator import", "default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls[\"create_dataset\"](", "warnings.warn( \"The `channel` argument is deprecated; use \" \"`transport` instead.\",", "actually sending data to the service. if transport: if callable(transport):", "TODO: Initialize `input_config`: >>> input_config = {} >>> >>> response", "(Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method.", "_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object): \"\"\" AutoML Server API. The", "= service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls[\"import_data\"]( request, retry=retry, timeout=timeout, metadata=metadata", "warnings.warn( \"The `client_config` argument is deprecated.\", PendingDeprecationWarning, stacklevel=2, ) else:", "self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Deploys model. Returns", "return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self,", "default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls[\"export_data\"](", "production environment. Returns ``google.protobuf.Empty`` in the ``response`` field when it", "location. filter_ (str): An expression for filtering the results of", "self._inner_api_calls[\"list_models\"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", )", "gRPC protocol. This argument may also be a callable which", "Iterate over all results >>> for element in client.list_datasets(parent): ...", "method. If not specified, the default configuration is used. client_info", "client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls[\"get_model\"]( request, retry=retry, timeout=timeout,", "client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"]( request, retry=retry, timeout=timeout,", "request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator(", "ValueError( \"Received both a transport instance and \" \"credentials; these", ") else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse", "an ``UndeployModelResponse`` in the ``response`` field when it completes. Example:", "import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 from google.protobuf", ">>> metadata = response.metadata() Args: parent (str): Resource name of", "private key json file. args: Additional arguments to pass to", "# Handle metadata. >>> metadata = response.metadata() Args: name (str):", "metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None,", "# TODO: Initialize `input_config`: >>> input_config = {} >>> >>>", "existence. See example below for the last. Some examples of", "'[LOCATION]') >>> >>> # TODO: Initialize `model`: >>> model =", "\"projects/{project}/locations/{location}\", project=project, location=location, ) @classmethod def dataset_path(cls, project, location, dataset):", "dataset=dataset, ) @classmethod def model_path(cls, project, location, model): \"\"\"Return a", "page at a time >>> for page in client.list_datasets(parent).pages: ...", "= google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info =", "created after the resources with those names are deleted. An", "cached API call functions. # These are the actual callables", "the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]):", "= client.deploy_model(name) Args: name (str): Resource name of the model", ">>> >>> # Iterate over results one page at a", "is deprecated.\", PendingDeprecationWarning, stacklevel=2, ) else: client_config = auto_ml_client_config.config if", "), request=request, items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def delete_model(", "Save a dictionary of cached API call functions. # These", "if credentials: raise ValueError( \"Received both a transport instance and", "send a user-agent string along with API requests. If ``None``,", "self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Imports data", "If page streaming is performed per-page, this determines the maximum", "after the resources with those names are deleted. An ID", "= client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results", ">>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response", "in AutoML BE, and does not change the status of", "import google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template", "client will attempt to ascertain the credentials from the environment.", "Handle metadata. >>> metadata = response.metadata() Args: name (str): Resource", "Creates a model. Returns a Model in the ``response`` field", "for filtering the results of the request. - ``annotation_spec_id`` -", "as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object", "model has image\\_classification\\_model\\_metadata. - ``dataset_id=5`` --> The model was created", "an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can also be", "name of the model to deploy. retry (Optional[google.api_core.retry.Retry]): A retry", "using the filter is: - ``translation_dataset_metadata:*`` --> The dataset has", "import enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2", "# timeout, and the like. self._inner_api_calls = {} # Service", "= google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name)", "dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from", "'[MODEL]') >>> >>> # Iterate over all results >>> for", "field when it completes. Example: >>> from google.cloud import automl_v1beta1", "self._inner_api_calls[ \"delete_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs[\"DeleteDataset\"].retry, default_timeout=self._method_configs[\"DeleteDataset\"].timeout, client_info=self._client_info, )", "parent (str): Resource name of the parent project where the", "where the model is being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The", "responsible for actually making the API calls. The default transport", "contents. Returns empty response in the ``response`` field when it", "This argument is mutually exclusive with providing a transport instance", "mutually exclusive.\" ) self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport(", "--> The model was created from a sibling dataset with", "retry and timeout logic. if \"list_datasets\" not in self._inner_api_calls: self._inner_api_calls[", "element ... pass Args: parent (str): The resource name of", "google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import", "the default settings for retry and timeout for each RPC", "iterator def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "limitations under the License. \"\"\"Accesses the google.cloud.automl.v1beta1 AutoMl API.\"\"\" import", "the `*_config.py` # file next to this one.) self._method_configs =", "model evaluation was done for aggregate of all annotation specs.", "service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_model_evaluation(", "If a dict is provided, it must be of the", "process element ... pass >>> >>> >>> # Alternatively: >>>", "also be a callable which returns a transport instance. Callables", "dataset) Args: parent (str): The resource name of the project", "def model_path(cls, project, location, model): \"\"\"Return a fully-qualified model string.\"\"\"", "service. If none are specified, the client will attempt to", "Args: name (str): The resource name of the dataset to", "results >>> for element in client.list_model_evaluations(parent): ... # process element", "\"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location, model=model, model_evaluation=model_evaluation, ) def __init__( self, transport=None,", "which to make calls. This argument is mutually exclusive with", "logic. if \"create_model\" not in self._inner_api_calls: self._inner_api_calls[ \"create_model\" ] =", "model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "model_evaluation_path(cls, project, location, model, model_evaluation): \"\"\"Return a fully-qualified model_evaluation string.\"\"\"", "of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object", "instances. This object can also be configured to iterate over", "it must be of the same form as the protobuf", "a transport instance. Callables will be sent the credentials as", "the deployed model in the production environment. Returns ``google.protobuf.Empty`` in", "metadata=None, ): \"\"\" Exports dataset's data to a Google Cloud", "over all results >>> for element in client.list_model_evaluations(parent): ... #", "of its contents. Returns empty response in the ``response`` field", "dataset's data to a Google Cloud Storage bucket. Returns an", "name of the parent project where the model is being", "Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable", ">>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_evaluation_path('[PROJECT]',", "the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]):", "if callable(transport): self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if", "else: client_config = auto_ml_client_config.config if channel: warnings.warn( \"The `channel` argument", "for retry and timeout for each RPC # from the", "Copyright 2018 Google LLC # # Licensed under the Apache", "raise ValueError( \"Received both a transport instance and \" \"credentials;", "the model is being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model", "\"get_model\" not in self._inner_api_calls: self._inner_api_calls[ \"get_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model,", "evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", "to add retry and timeout logic. if \"export_data\" not in", "timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, )", "``None`` is specified, requests will not be retried. timeout (Optional[float]):", "in self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs[\"UndeployModel\"].retry, default_timeout=self._method_configs[\"UndeployModel\"].timeout,", "API. The resource names are assigned by the server. The", "the interface for this client. This is the key used", "that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance.", "form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry", "import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2", "ID different than 4. - ``NOT annotation_spec_id:*`` --> The model", "response.metadata() Args: parent (str): Resource name of the parent project", ":class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any", "or existence. See example below for the last. Some examples", "environment. Returns ``google.protobuf.Empty`` in the ``response`` field when it completes,", "``{dataset_id}``. \"\"\" SERVICE_ADDRESS = \"automl.googleapis.com:443\" \"\"\"The default address of the", "# # Copyright 2018 Google LLC # # Licensed under", "response = client.export_data(name, output_config) >>> >>> def callback(operation_future): ... #", ">>> >>> response = client.create_model(parent, model) >>> >>> def callback(operation_future):", "import functools import pkg_resources import warnings from google.oauth2 import service_account", "dictionary of cached API call functions. # These are the", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists model evaluations. Example: >>>", "metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation`", "Resource name of the parent project where the model is", "self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls[\"get_dataset\"](", "Iterate over all results >>> for element in client.list_model_evaluations(parent): ...", "spec with ID different than 4. - ``NOT annotation_spec_id:*`` -->", "the service. if transport: if callable(transport): self.transport = transport( credentials=credentials,", "calls. This argument is mutually exclusive with ``credentials``; providing both", "add retry and timeout logic. if \"get_dataset\" not in self._inner_api_calls:", "from which to list datasets. filter_ (str): An expression for", "self.transport.import_data, default_retry=self._method_configs[\"ImportData\"].retry, default_timeout=self._method_configs[\"ImportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation", "exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests.", "over results one page at a time >>> for page", ">>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response =", "one page at a time >>> for page in client.list_models(parent).pages:", "Additional arguments to pass to the constructor. Returns: AutoMlClient: The", "be a callable which returns a transport instance. Callables will", "resource name of the project to create the dataset for.", "client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the", "= {} >>> >>> response = client.import_data(name, input_config) >>> >>>", "4. - ``NOT annotation_spec_id:*`` --> The model evaluation was done", "the results of the request. - ``annotation_spec_id`` - for =,", "auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2 from", "resources contained in the underlying API response. If page streaming", "of time, in seconds, to wait for the request to", "affect the return value. If page streaming is performed per-page,", "for existence of the case. An example of using the", "maximum number of resources contained in the underlying API response.", "Note that if ``retry`` is specified, the timeout applies to", "deletes the model in AutoML BE, and does not change", "spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "Version 2.0 (the \"License\"); # you may not use this", ">>> >>> # Handle metadata. >>> metadata = response.metadata() Args:", "metadata=metadata, ), request=request, items_field=\"model\", request_token_field=\"page_token\", response_token_field=\"next_page_token\", ) return iterator def", "for aggregate of all annotation specs. page_size (int): The maximum", "A ``Channel`` instance through which to make calls. This argument", "request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata )", "retry and timeout for each RPC # from the client", ">>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata", "TODO: Initialize `model`: >>> model = {} >>> >>> response", "Some examples of using the filter are: - ``annotation_spec_id!=4`` -->", "If ``None``, then default info will be used. Generally, you", "coding: utf-8 -*- # # Copyright 2018 Google LLC #", "@classmethod def model_path(cls, project, location, model): \"\"\"Return a fully-qualified model", "and # deserialization and actually sending data to the service.", "of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry", "retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Lists datasets in a project.", "a callable which returns a transport instance. Callables will be", "if ``retry`` is specified, the timeout applies to each individual", "dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client", "\"\"\"Creates an instance of this client using the provided credentials", "for it: a global evaluation, and one evaluation for each", "self._inner_api_calls[ \"list_models\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info, )", "google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto import", "this determines the maximum number of resources in a page.", "\"list_model_evaluations\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, ) request", "self._inner_api_calls[\"create_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_dataset( self, name,", "filename (str): The path to the service account private key", "process element ... pass Args: parent (str): Resource name of", "Server API. The resource names are assigned by the server.", "by applicable law or agreed to in writing, software #", ">>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]',", "of the request. - ``dataset_metadata`` - for existence of the", "a model. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "your own client library. \"\"\" # Raise deprecation warnings for", "def callback(operation_future): ... # Handle result. ... result = operation_future.result()", "model = {} >>> >>> response = client.create_model(parent, model) >>>", "): \"\"\" Undeploys model. Returns an ``UndeployModelResponse`` in the ``response``", "import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto", "\"\"\" SERVICE_ADDRESS = \"automl.googleapis.com:443\" \"\"\"The default address of the service.\"\"\"", "request = service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls[\"delete_model\"]( request, retry=retry, timeout=timeout, metadata=metadata", "response = client.delete_model(name) >>> >>> def callback(operation_future): ... # Handle", "transport instance. Callables will be sent the credentials as the", "message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for", "model=model) operation = self._inner_api_calls[\"create_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return", "logic. if \"create_dataset\" not in self._inner_api_calls: self._inner_api_calls[ \"create_dataset\" ] =", "= or !=. Some examples of using the filter are:", ">>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]',", "to attach to requests. These credentials identify this application to", ">>> >>> # TODO: Initialize `dataset`: >>> dataset = {}", "none are specified, the client will attempt to ascertain the", "import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Imports", "Required. The desired input location. If a dict is provided,", "if \"undeploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\" ] = google.api_core.gapic_v1.method.wrap_method(", "add retry and timeout logic. if \"delete_dataset\" not in self._inner_api_calls:", ">>> >>> # TODO: Initialize `output_config`: >>> output_config = {}", "client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # Iterate over all results", "will list model evaluations from across all models of the", "fully-qualified model_evaluation string.\"\"\" return google.api_core.path_template.expand( \"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}\", project=project, location=location, model=model, model_evaluation=model_evaluation,", "field when it completes, and ``delete_details`` in the ``metadata`` field.", "import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator", "amount of time, in seconds, to wait for the request", "is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises:", "The constructed client. \"\"\" credentials = service_account.Credentials.from_service_account_file(filename) kwargs[\"credentials\"] = credentials", "in self._inner_api_calls: self._inner_api_calls[ \"create_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs[\"CreateModel\"].retry, default_timeout=self._method_configs[\"CreateModel\"].timeout,", ">>> >>> response = client.get_model_evaluation(name) Args: name (str): Resource name", "default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation =", "is being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create.", "google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from", "AutoML BE, and does not change the status of the", "retry and timeout logic. if \"get_model_evaluation\" not in self._inner_api_calls: self._inner_api_calls[", "\"get_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs[\"GetModel\"].retry, default_timeout=self._method_configs[\"GetModel\"].timeout, client_info=self._client_info, ) request", "# Service calls def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "add retry and timeout logic. if \"list_models\" not in self._inner_api_calls:", "request, retry=retry, timeout=timeout, metadata=metadata ) def list_models( self, parent, filter_=None,", "The model has image\\_classification\\_model\\_metadata. - ``dataset_id=5`` --> The model was", "A retry object used to retry requests. If ``None`` is", "Initialize `input_config`: >>> input_config = {} >>> >>> response =", "\"`transport` instead.\", PendingDeprecationWarning, stacklevel=2, ) # Instantiate the transport. #", "metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the", "The resource name of the project from which to list", "the filter is: - ``translation_dataset_metadata:*`` --> The dataset has translation\\_dataset\\_metadata.", "page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests.", ") return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data(", "self._inner_api_calls[\"delete_dataset\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client,", "If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo):", "For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for the item is ``{dataset_id}``.", "applicable law or agreed to in writing, software # distributed", "of the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output", "add retry and timeout logic. if \"get_model_evaluation\" not in self._inner_api_calls:", "client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.get_model(name) Args: name", "Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed", "response = client.create_model(parent, model) >>> >>> def callback(operation_future): ... #", "the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "name. Dataset must already exist. All imported annotations and examples", "json file. args: Additional arguments to pass to the constructor.", "does not affect the return value. If page streaming is", "'[MODEL]') >>> >>> response = client.deploy_model(name) Args: name (str): Resource", "the environment. This argument is mutually exclusive with providing a", ">>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate", "the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used", "request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty,", "else: if credentials: raise ValueError( \"Received both a transport instance", "metadata=metadata ) def list_model_evaluations( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "self._inner_api_calls: self._inner_api_calls[ \"list_models\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs[\"ListModels\"].retry, default_timeout=self._method_configs[\"ListModels\"].timeout, client_info=self._client_info,", "name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Undeploys model. Returns an", "client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.get_dataset(name) Args: name", "model evaluations. Example: >>> from google.cloud import automl_v1beta1 >>> >>>", "list_model_evaluations( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\"", "is the last element of the item's resource name. For", "response = client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle", "google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs[\"ExportData\"].retry, default_timeout=self._method_configs[\"ExportData\"].timeout, client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name, output_config=output_config)", ":class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model`", "not change the status of the deployed model in the", "# You may obtain a copy of the License at", ":class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can also be configured to iterate", "the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for", "response = client.get_model(name) Args: name (str): Resource name of the", "channel=None, credentials=None, client_config=None, client_info=None, ): \"\"\"Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials,", "instance through which to make calls. This argument is mutually", "a dict is provided, it must be of the same", "transport method to add retry and timeout logic. if \"list_models\"", "self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials", "interface for this client. This is the key used to", "is provided, it must be of the same form as", "transport methods, wrapped with `wrap_method` to add retry, # timeout,", "import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object): \"\"\" AutoML Server", "evaluations are created for it: a global evaluation, and one", "(str): Resource name of the model to deploy. retry (Optional[google.api_core.retry.Retry]):", "None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION", "deployed model in the production environment. Returns ``google.protobuf.Empty`` in the", "protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to", "the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used", "argument and the default transport class as the second argument.", "model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Creates a model. Returns", "in self._inner_api_calls: self._inner_api_calls[ \"list_datasets\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout,", "string along with API requests. If ``None``, then default info", "from google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto", "self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is", "google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs[\"CreateDataset\"].retry, default_timeout=self._method_configs[\"CreateDataset\"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset)", "``response`` field when it completes, and ``delete_details`` in the ``metadata``", ">>> >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>>", "has translation\\_dataset\\_metadata. page_size (int): The maximum number of resources contained", ">>> >>> response = client.undeploy_model(name) Args: name (str): Resource name", "annotation_spec_id:*`` --> The model evaluation was done for aggregate of", "timeout logic. if \"list_datasets\" not in self._inner_api_calls: self._inner_api_calls[ \"list_datasets\" ]", "`model`: >>> model = {} >>> >>> response = client.create_model(parent,", "to add retry and timeout logic. if \"undeploy_model\" not in", "results of the request. - ``annotation_spec_id`` - for =, !=", "name of the model to list the model evaluations for.", "DEPRECATED. A ``Channel`` instance through which to make calls. This", "and timeout logic. if \"export_data\" not in self._inner_api_calls: self._inner_api_calls[ \"export_data\"", "\"\"\"The default address of the service.\"\"\" # The name of", "By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This", "not in self._inner_api_calls: self._inner_api_calls[ \"get_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry,", "with those names are deleted. An ID of a resource", "If ``None`` is specified, requests will not be retried. timeout", "Returns ``google.protobuf.Empty`` in the ``response`` field when it completes, and", "google.longrunning import operations_pb2 as longrunning_operations_pb2 from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION", "(Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. If a dict", ") request = service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls[\"delete_model\"]( request, retry=retry, timeout=timeout,", ">>> output_config = {} >>> >>> response = client.export_data(name, output_config)", "of the case. - ``dataset_id`` - for = or !=.", "used to retry requests. If ``None`` is specified, requests will", "requests will not be retried. timeout (Optional[float]): The amount of", "name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry", "done for aggregate of all annotation specs. page_size (int): The", "\"\"\" Gets a dataset. Example: >>> from google.cloud import automl_v1beta1", "(str): An expression for filtering the results of the request.", "name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): \"\"\" Imports data into", "not in self._inner_api_calls: self._inner_api_calls[ \"list_datasets\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry,", "transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport instance, responsible for", "name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize", "to add retry and timeout logic. if \"create_model\" not in", "deleted. An ID of a resource is the last element", "\"\"\" AutoML Server API. The resource names are assigned by", "\"License\"); # you may not use this file except in", "# Alternatively: >>> >>> # Iterate over results one page", "attempt to ascertain the credentials from the environment. This argument", "(str): Required. The resource name of the dataset. output_config (Union[dict,", "The resource name of the project to create the dataset", "doing so will raise an exception. client_config (dict): DEPRECATED. A", "pkg_resources.get_distribution(\"google-cloud-automl\").version class AutoMlClient(object): \"\"\" AutoML Server API. The resource names", "add retry and timeout logic. if \"create_model\" not in self._inner_api_calls:", "the default transport class as the second argument. channel (grpc.Channel):", "of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can also be configured to", "is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can also", "client using the provided credentials file. Args: filename (str): The", "'[MODEL]') >>> >>> response = client.delete_model(name) >>> >>> def callback(operation_future):", "to set this if you're developing your own client library.", "timeout, and the like. self._inner_api_calls = {} # Service calls", ") def export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ):", "in self._inner_api_calls: self._inner_api_calls[ \"get_dataset\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs[\"GetDataset\"].retry, default_timeout=self._method_configs[\"GetDataset\"].timeout,", "to pass to the constructor. Returns: AutoMlClient: The constructed client.", "default_retry=self._method_configs[\"ListModelEvaluations\"].retry, default_timeout=self._method_configs[\"ListModelEvaluations\"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size", "credentials as the first argument and the default transport class", "(Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If a dict is", "``dataset_id`` - for = or !=. Some examples of using", "'[LOCATION]', '[DATASET]') >>> >>> response = client.delete_dataset(name) >>> >>> def", "client_info=self._client_info, ) request = service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator", "default_timeout=self._method_configs[\"UndeployModel\"].timeout, client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls[\"undeploy_model\"]( request, retry=retry,", "in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to", "client_info=None, ): \"\"\"Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A", "= self._inner_api_calls[\"create_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation,", "message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry", "an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to", "(Optional[float]): The amount of time, in seconds, to wait for", "Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed", "for page in client.list_model_evaluations(parent).pages: ... for element in page: ...", "of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry", "\"undeploy_model\" not in self._inner_api_calls: self._inner_api_calls[ \"undeploy_model\" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model,", "of the case. An example of using the filter is:", "self.transport.list_datasets, default_retry=self._method_configs[\"ListDatasets\"].retry, default_timeout=self._method_configs[\"ListDatasets\"].timeout, client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_,", "API.\"\"\" import functools import pkg_resources import warnings from google.oauth2 import", "Additional metadata that is provided to the method. Returns: A", "than 4. - ``NOT annotation_spec_id:*`` --> The model evaluation was", "for the request to complete. Note that if ``retry`` is", "An ID of a resource is the last element of", "Gets a model. Example: >>> from google.cloud import automl_v1beta1 >>>", "= service_pb2.DeployModelRequest(name=name) return self._inner_api_calls[\"deploy_model\"]( request, retry=retry, timeout=timeout, metadata=metadata ) def", ") return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model(", "metadata=metadata ) def list_datasets( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT,", "name (str): Resource name of the model to deploy. retry" ]
[ "task.working_hours_open = 0.0 task.working_days_open = 0.0 if task.date_end: dt_date_end =", "self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active =", "_compute_partner_phone(self): for project in self: if project.partner_id and project.partner_phone !=", "phone number will also be updated.') elif will_write_email: task.ribbon_message =", "if the project partner_id changes, the task partner_id is automatically", "vals): now = fields.Datetime.now() if 'parent_id' in vals and vals['parent_id']", "if project_task_type.fold or project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return {'date_end': False}", "= super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids", "'Sunday'), ], string='Day Of The Week', compute='_compute_repeat', readonly=False) repeat_month =", ")) portal_privacy = self.project_id.privacy_visibility == 'portal' for group_name, group_method, group_data", "('august', 'August'), ('september', 'September'), ('october', 'October'), ('november', 'November'), ('december', 'December'),", "def unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False) # retrieves all the", "Project', ondelete=\"restrict\", help=\"Project in which sub-tasks of the current project", "def _onchange_alias_name(self): if not self.alias_enabled: self.alias_name = False def _compute_alias_enabled(self):", "we used res_model & res_id displayed_image_id = fields.Many2one('ir.attachment', domain=\"[('res_model', '=',", "in communication history website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name),", "using # suggested recipients. This heuristic allows to avoid ugly", "only see the followed project and tasks.\\n\" \"- All internal", "changes, the task partner_id is automatically changed also. 2) if", "they are notified they should probably have access to the", "UserError(_(\"Sorry. You can't set a task as its parent task.\"))", "date in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' % date.strftime(date_format) if task.repeat_type", "= task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open = duration_data['days']", "internal users: employees may see everything.\" \" Portal users may", "self.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False)", "'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message =", "= task.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task,", "# --------------------------------------------------- # Mail gateway # --------------------------------------------------- def _track_template(self, changes):", "pdata['groups'] and pdata['id'] in allowed_user_ids new_group = ('group_project_user', group_func, {})", "users and all internal users'), ], string='Visibility', required=True, default='portal', help=\"Defines", "'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char( 'Red Kanban", "records=task, force_create=False) if p] task.message_subscribe(partner_ids) return task def message_update(self, msg,", "tasks |= self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals) # rating on", "and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result = super(Project, self).unlink()", "tasks'), ], default='this', store=False) recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval", "def _compute_recurring_count(self): self.recurring_count = 0 recurring_tasks = self.filtered(lambda l: l.recurrence_id)", "'=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Number of documents attached\") date_start", "if image_attachments: self.displayed_image_id = image_attachments[0] if self.email_from and not self.partner_id:", "create(self, vals): # Prevent double project creation self = self.with_context(mail_create_nosubscribe=True)", "self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks = super().create(vals_list) for task in", "string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char( 'Red Kanban Label', default=lambda s:", "linked for financial management. \" \"Use an analytic account to", "not want to explicitly set user_id to False; however we", "current_objects = [h for h in headers.get('X-Odoo-Objects', '').split(',') if h]", "'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ]) for project in projects: project.task_ids._send_task_rating_mail()", "stages, domain, order): search_domain = [('id', 'in', stages.ids)] if 'default_project_id'", "= project.partner_id.phone def _inverse_partner_phone(self): for project in self: if project.partner_id", "= self.env.user in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects = not_fav_projects =", "project_id: return False return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=',", "in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_id.phone =", "(or optionally Issues if the Issue Tracker module is installed).\")", "when changing stage'), ('periodic', 'Periodical Rating') ], 'Customer Ratings Status',", "id' def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return [default_project_id] if default_project_id", "_check_parent_id(self): for task in self: if not task._check_recursion(): raise ValidationError(_('Error!", "from .project_task_recurrence import DAYS, WEEKS class ProjectTaskType(models.Model): _name = 'project.task.type'", "the scheduler @api.model def _send_rating_all(self): projects = self.search([ ('rating_active', '=',", "communication history website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type',", "of the project is too restricted. Set the privacy to", "task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg) partner_ids =", "<= 0, return all children without depth limit def _get_all_subtasks(self,", "to new project \"\"\" project = self.browse(new_project_id) tasks = self.env['project.task']", "False)], ['project_id'], ['project_id']) result = dict((data['project_id'][0], data['project_id_count']) for data in", "task in self: if task.kanban_state == 'normal': task.kanban_state_label = task.legend_normal", "value displayed for the normal state for kanban selection, when", "= fields.Date.today() number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after'", "to the document. \"\"\" groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals =", "null', domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\", check_company=True, help=\"Analytic", "'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -=", "button to portal users and portal customers. If they are", "1, return only direct children # If depth == 3,", "is None: custom_values = {} defaults = { 'name': msg.get('subject')", "record creation and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to", "default = {} if not default.get('name'): default['name'] = _(\"%s (copy)\")", "project = super(Project, self).copy(default) if self.subtask_project_id == self: project.subtask_project_id =", "not vals.get('subtask_project_id'): project.subtask_project_id = project.id if project.privacy_visibility == 'portal' and", "self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage', 'Rating when changing stage'), ('periodic',", "task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id')", "0.0 if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end,", "cannot archive recurring tasks. Please, disable the recurrence first.')) #", "= False def _compute_alias_enabled(self): for project in self: project.alias_enabled =", "right if 'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project,", "= old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks", "def _get_default_color(self): return randint(1, 11) name = fields.Char('Name', required=True) color", "tracking=True, required=True, index=True) description = fields.Html(string='Description') priority = fields.Selection([ ('0',", "{}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func = lambda pdata: pdata['type']", "@api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon',", "= fields.One2many('project.task', 'project_id', string=\"Task Activities\") resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working", "task.stage_id = task.stage_find(task.project_id.id, [ ('fold', '=', False), ('is_closed', '=', False)])", "or (task.repeat_unit == 'year' and task.repeat_on_year == 'date') task.repeat_show_week =", "task.access_warning = _( \"The task cannot be shared with the", "task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id, } def map_tasks(self, new_project_id): \"\"\"", "_creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label' in", "fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False) wed = fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False) thu =", "('project_date_greater', 'check(date >= date_start)', 'Error! project start-date must be lower", "follower, a specific one) # on a document without customer", "internal users'), ('employees', 'All internal users'), ('portal', 'Invited portal users", "min(5, task.repeat_number if task.repeat_type == 'after' else 5) delta =", "= fields.Char( 'Green Kanban Label', default=lambda s: _('Ready'), translate=True, required=True,", "see all project and tasks.\\n\" \"- Invited portal and all", "task action['domain'] = [('id', 'child_of', self.id), ('id', '!=', self.id)] #", "s: _('In Progress'), translate=True, required=True, help='Override the default value displayed", "rating_template = task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self):", "= self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users = all_users - portal_users", "\"Use an analytic account to record cost and revenue on", "compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([ ('day', 'Days'), ('week', 'Weeks'), ('month',", "@api.returns('self', lambda value: value.id) def copy(self, default=None): if default is", "Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline',", "fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char(", "'after' else 5) delta = task.repeat_interval if task.repeat_unit == 'day'", "'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self):", "vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() # recurrence rec_fields = vals.keys() &", "project visibility setting doesn't allow portal users to see the", "values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True, }) return analytic_account", "order to make it accessible by the recipient(s).\") @api.depends('child_ids.planned_hours') def", "ast from datetime import timedelta, datetime from random import randint", "return action def _compute_is_favorite(self): for project in self: project.is_favorite =", "((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self):", "This heuristic allows to avoid ugly hacks in JS. new_partner", "self.id) return action def _compute_is_favorite(self): for project in self: project.is_favorite", "allowed portal users \"\"\" res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)", "task.recurrence_id[f] else: if task.recurring_task: task[f] = defaults.get(f) else: task[f] =", "task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]})", "# If depth == 1, return only direct children #", "_create_analytic_account(self): for project in self: analytic_account = self.env['account.analytic.account'].create({ 'name': project.name,", "sub.internal or sub.default)).ids if project_subtypes else None if not subtype_ids", "self.ensure_one() return self.repeat_interval > 0 and\\ (not self.repeat_show_dow or self._get_weekdays())", "fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard', help=\"Whether this project should", "store=True, group_operator=\"avg\") # customer portal: include comment and incoming emails", "analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False, ondelete='set null', domain=\"['|', ('company_id',", "for rec_field in rec_fields} for task in self: if task.recurrence_id:", "section_ids = [] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = []", "cannot delete a project containing tasks. You can either archive", "@api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self): for task in self: will_write_email", "= self.filtered(lambda rec: not rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None,", "'form', 'res_model': 'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context, create=False)", "domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\") partner_is_company = fields.Boolean(related='partner_id.is_company',", "self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p] task.message_subscribe(partner_ids) return task def message_update(self,", "users: employees may see all project and tasks.\\n\" \"- Invited", "= fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias',", "= self.search([ ('rating_active', '=', True), ('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=',", "super(Task, self)._compute_access_url() for task in self: task.access_url = '/my/task/%s' %", "elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility !=", "'Red Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True, help='Override the", "task.repeat_interval if task.repeat_unit == 'day' else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates(", "def _compute_repeat_visibility(self): for task in self: task.repeat_show_day = task.recurring_task and", "result def update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold or", "check_company=True, change_default=True) planned_hours = fields.Float(\"Initially Planned Hours\", help='Time planned to", "Date', index=True, tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete=\"restrict\", help=\"Project", "that stage to display.') rating_template_id = fields.Many2one( 'mail.template', string='Rating Email", "in readgroup] + self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids':", "create=False) } def action_subtask(self): action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display all", "in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()): date = fields.Date.today() number_occurrences", "'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id':", "return res def action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids })", "default=lambda self: self.env.company) currency_id = fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\", readonly=True) analytic_account_id", "task.working_days_open = 0.0 if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data =", "related_sudo=False) is_closed = fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\", readonly=True, related_sudo=False) parent_id =", "email will be sent to the customer when the task", "vals: vals['date_assign'] = now # recurrence fields rec_fields = vals.keys()", "or (task.repeat_unit == 'year' and task.repeat_on_year == 'day') task.repeat_show_dow =", "'res_model': 'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context, create=False) }", "_ from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning from odoo.tools.misc", "'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def", "if the project is not assigned to the stage readgroup", "Manager', default=lambda self: self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use email alias',", "# check left-part is not already an alias aliases =", "create recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task", "project. Incoming emails are automatically synchronized \" \"with Tasks (or", "action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0] action['display_name']", "self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks =", "activate default filters\"\"\" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of", "= min(5, task.repeat_number if task.repeat_type == 'after' else 5) delta", "tasks when unsubscribing from a project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return", "help=\"Analytic account to which this project is linked for financial", "self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True, }) return analytic_account def _create_analytic_account(self):", "'image') if image_attachments: self.displayed_image_id = image_attachments[0] if self.email_from and not", "be updated.') elif will_write_email: task.ribbon_message = _('By saving this change,", "'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit',", "def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|', '&',", "date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for date in recurring_dates[:5]:", "fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite',", "project And add the portal user subscribed to allowed portal", "- task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def", "Also give access button to portal users and portal customers.", "message.\") # In the domain of displayed_image_id, we couln't use", "super().unlink() # --------------------------------------------------- # Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def", "a task is pulled in another stage.\\n\" \"- Periodical Rating:", "# rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer", "rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields: rec_values = {rec_field:", "project in self: project.access_url = '/my/project/%s' % project.id def _compute_access_warning(self):", "working time elapsed between record creation and assignation/closing. working_hours_open =", "'res_id': wizard.id, 'target': 'new', 'context': context, } def write(self, vals):", "recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string=\"Recurrent\") recurring_count =", "task.partner_id.email will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone if will_write_email", "customer phone number will also be updated.') else: task.ribbon_message =", "for which you want to get the customer's feedbacks.\") rating_status_period", "'=', False)], ['project_id'], ['project_id']) result = dict((data['project_id'][0], data['project_id_count']) for data", "in rec_fields} for task in self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif", "project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name = \"project.task\" _description = \"Task\"", "= task.recurring_task and task.repeat_unit == 'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields", "self.mapped('project_id.alias_name') return [x for x in email_list if x.split('@')[0] not", "# Actions # --------------------------------------------------- def toggle_favorite(self): favorite_projects = not_fav_projects =", "tasks and create new one directly from notification emails. Also", "fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id',", "def _compute_access_url(self): super(Project, self)._compute_access_url() for project in self: project.access_url =", "return values # --------------------------------------------------- # Actions # --------------------------------------------------- def toggle_favorite(self):", "in this stage are considered as closed.\") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning')", "self._get_recurrence_fields() if rec_fields: rec_values = {rec_field: vals[rec_field] for rec_field in", "depth == 1, return only direct children # If depth", "vals else True if allowed_users_changed: for project in self: permission_removed", "= True def _get_default_stage_id(self): \"\"\" Gives default stage_id \"\"\" project_id", "7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly': 365} for", "compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([ ('date', 'Date of the Year'),", "[DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year',", "\"\"\" Override to update the task according to the email.", "customer when the task or issue reaches this step.\") fold", "store=True, group_operator=\"avg\") working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True,", "'Rating when changing stage', then an email will be sent", "def _notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict() if self.project_id: current_objects =", "return super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Add", "headers = super(Task, self)._notify_email_header_dict() if self.project_id: current_objects = [h for", "[('fold', '=', False), ('is_closed', '=', False)]) @api.model def _default_company_id(self): if", "are no records in that stage to display.') rating_template_id =", "days to close', store=True, group_operator=\"avg\") # customer portal: include comment", "# Business Methods # --------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values): analytic_account", "if the parent task partner_id changes, the task partner_id remains", "_compute_attachment_ids(self): for task in self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id),", "recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until,", "lambda value: value.id) def copy(self, default=None): if default is None:", "in vals and not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return", "or _stage_find), if project_id not in default_stage: default_stage[project_id] = self.with_context(", "= fields.Boolean('Automatic kanban status', default=False, help=\"Automatically modify the kanban state", "planned of all the sub-tasks linked to this task. Usually", "default_fields for d in days): vals[days[week_start]] = True if 'repeat_day'", "company_id)]\") partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False)", "in days): vals[days[week_start]] = True if 'repeat_day' in default_fields: vals['repeat_day']", "self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None", "in self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today()", "--------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name',", "default['name'] = _(\"%s (copy)\") % (self.name) project = super(Project, self).copy(default)", "file for full copyright and licensing details. import ast from", "& self._get_recurrence_fields() if rec_fields and vals.get('recurring_task') is True: rec_values =", "Issue Tracker module is installed).\") privacy_visibility = fields.Selection([ ('followers', 'Invited", "'Years'), ], default='week', compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([ ('forever', 'Forever'),", "them or by someone of their company.\") allowed_user_ids = fields.Many2many('res.users',", "_compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda task: task.project_id.resource_calendar_id and task.create_date )", "'<=', fields.Datetime.now()) ]) for project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit()", "saving this change, the customer email will also be updated.')", "keeping the batch creation of tasks # 2) Ensure the", "default.get('name'): default['name'] = _(\"%s (copy)\") % (self.name) project = super(Project,", "planned to achieve this task (including its sub-tasks).', tracking=True) subtask_planned_hours", "# This method should be called once a day by", "self.partner_id: # we consider that posting a message with a", "default=_default_company_id) color = fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email', string='User Email',", "task.partner_id.email def _inverse_partner_email(self): for task in self: if task.partner_id and", "self.repeat_unit == 'week': return [fn(n) for day, fn in DAYS.items()", "task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for task in", "action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|', '&', ('res_model', '=',", "self: self.env.uid, index=True, tracking=True) partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True,", "rate, token=None, feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\")", "= fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True) alias_enabled =", "= allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids def _compute_access_url(self): super(Project,", "self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values() values['alias_model_id'] =", "'default_project_id' in self.env.context: search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] +", "= fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\", readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent", "_('Ratings of %s') % (self.name,) action_context = ast.literal_eval(action['context']) if action['context']", "self.kanban_state == 'done': return self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values: return", "string='Working Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks", "the customer when the task or issue reaches this step.\")", "vals): # Prevent double project creation self = self.with_context(mail_create_nosubscribe=True) project", "an active_test context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids", "self: if project.partner_id and project.partner_email != project.partner_id.email: project.partner_id.email = project.partner_email", "= recurrence.id tasks = super().create(vals_list) for task in tasks: if", "kanban view when there are no records in that stage", "for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not in", "for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self):", "for task in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign", "date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage", "fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close", "_('Parent Task'), 'view_mode': 'form', 'res_model': 'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window',", "= fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task Project', readonly=True) allow_subtasks = fields.Boolean(string=\"Allow Sub-tasks\",", "|= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids", "readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False,", "created. It can be the current project itself.\") allow_subtasks =", "def _alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if", "= fields.Char(string='Use Tasks as', default='Tasks', help=\"Label used for the tasks", "!= project.partner_email: project.partner_email = project.partner_id.email def _inverse_partner_email(self): for project in", "def _compute_alias_enabled(self): for project in self: project.alias_enabled = project.alias_domain and", "and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False})", "\"\"\" project_id = self.env.context.get('default_project_id') if not project_id: return False return", "check left-part is not already an alias aliases = self.mapped('project_id.alias_name')", "'&', ('res_model', '=', 'project.task'), ('res_id', 'in', self.task_ids.ids) ]) action['context'] =", "website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email',", "for project in self.with_context(active_test=False): if project.tasks: raise UserError(_('You cannot delete", "when changing stage', then an email will be sent to", "def _compute_allowed_users(self): for project in self: users = project.allowed_internal_user_ids |", "of the project to 'Visible by following customers' in order", "== 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility", "= OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else:", "task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open = duration_data['days'] else:", "'Ready'), ('blocked', 'Blocked')], string='Kanban State', copy=False, default='normal', required=True) kanban_state_label =", "message_process. This override updates the document according to the email.", "order): search_domain = [('id', 'in', stages.ids)] if 'default_project_id' in self.env.context:", "this task. Usually less or equal to the initially time", "'rating.mixin'] _mail_post_access = 'read' _order = \"priority desc, sequence, id", "i in range(1, 32) ], compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([", "'day' else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval,", "or '') + ',' + (msg.get('cc') or '')) # check", "'') + ',' + (msg.get('cc') or '')) # check left-part", "the kanban state to 'ready for the new stage' (green", "('stage_id', '=', False)], ['project_id'], ['project_id']) result = dict((data['project_id'][0], data['project_id_count']) for", "= fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False, ondelete='set null', domain=\"['|', ('company_id', '=',", "if recurrence_update == 'subsequent': for task in self: recurrence_domain =", "filters\"\"\" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of %s') %", "{} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None) return dict(action, context=action_context)", "order=order, limit=1).id # ------------------------------------------------ # CRUD overrides # ------------------------------------------------ @api.model", "'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values) task =", "tasks. Please, disable the recurrence first.')) # stage change: update", "for task in self: if task.kanban_state == 'normal': task.kanban_state_label =", "('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'),", "to hide the project without removing it.\") sequence = fields.Integer(default=10,", "by the mailgateway through message_process. This override updates the document", "@api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('Error! You cannot", "followed project and tasks.\\n\" \"- All internal users: employees may", "Internal Users\", default=lambda self: self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids =", "vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals) if vals else", "set up the mail templates on the stages for which", "= fields.Datetime.now() if 'parent_id' in vals and vals['parent_id'] in self.ids:", "type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages') task_count =", "= fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids)", "to 'Visible by following customers' in order to make it", "'kanban_state') def _compute_kanban_state_label(self): for task in self: if task.kanban_state ==", "custom_values=defaults) email_list = task.email_split(msg) partner_ids = [p.id for p in", "fields.Integer(string='Sequence', index=True, default=10, help=\"Gives the sequence order when displaying a", "task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open", "doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Number of documents attached\") date_start = fields.Date(string='Start", "return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage in", "current_objects.insert(0, 'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids:", "= fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite =", "} # --------------------------------------------------- # Rating business # --------------------------------------------------- def _send_task_rating_mail(self,", "task.project_id not in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [ ('fold', '=',", "rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} rec_values['next_recurrence_date'] =", "True, }) project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- # Rating business #", "= project.id if project.privacy_visibility == 'portal' and project.partner_id.user_ids: project.allowed_user_ids |=", "access button to portal users and portal customers. If they", "changes): res = super(Task, self)._track_template(changes) test_task = self[0] if 'stage_id'", "ctx.update({ 'default_name': self.env.context.get('name', self.name) + ':', 'default_parent_id': self.id, # will", "task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format", "action_view_account_analytic_line(self): \"\"\" return the action to see all the analytic", "# we consider that posting a message with a specified", "Task'), 'view_mode': 'form', 'res_model': 'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context':", "self.write({'user_id': self.env.user.id}) # If depth == 1, return only direct", "\"\"\" email_list = self.email_split(msg) partner_ids = [p.id for p in", "it was created through the chatter using # suggested recipients.", "if project_id: name = self.env['project.project'].browse(project_id).label_tasks if name: tname = name.lower()", "projects with a least 1 task in that stage #", "# Prevent double project creation self = self.with_context(mail_create_nosubscribe=True) project =", "cannot create recursive hierarchy of task(s).')) @api.model def get_empty_list_help(self, help):", "on your project.\") favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id',", "def _compute_company_id(self): for task in self.filtered(lambda task: task.project_id): task.company_id =", "False), ('stage_id', '=', False)]) color = fields.Integer(string='Color Index') user_id =", "elif will_write_phone: task.ribbon_message = _('By saving this change, the customer", "users = project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids = users def _inverse_allowed_user(self):", "vals.get('active') and any(self.mapped('recurrence_id')): # TODO: show a dialog to stop", "partner_id changes, the task partner_id is automatically changed also. 2)", "tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count} for task", "action_subtask(self): action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display all subtasks of current", "fields.Selection([ ('followers', 'Invited internal users'), ('employees', 'All internal users'), ('portal',", "store=True, readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\") # Computed", "or group_name == 'portal_customer' and not portal_privacy: group_data['has_button_access'] = False", "in default_fields for d in days): vals[days[week_start]] = True if", "# retrieves all the projects with a least 1 task", "self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals) # rating on stage if", "and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return", "False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed Portal Users\", domain=[('share', '=',", "a default stage; if not set, stages must be default", "= fields.Integer(default=10, help=\"Gives the sequence order when displaying a list", "def unlink(self): # Check project is empty for project in", "related=\"project_id.subtask_project_id\", string='Sub-task Project', readonly=True) allow_subtasks = fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True)", "'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], } # ---------------------------------------------------", "0 and\\ (not self.repeat_show_dow or self._get_weekdays()) and\\ (self.repeat_type != 'after'", "['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _order = \"priority", "store=True, readonly=False, required=True, copy=True, default=_default_company_id) color = fields.Integer(string='Color Index') user_email", "of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return", "task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids) @api.depends('child_ids')", "allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed Portal Users\", domain=[('share', '=', True)])", "This override updates the document according to the email. \"\"\"", "\\ .env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0] action['display_name'] = self.name return action def", "stage.') legend_normal = fields.Char( 'Grey Kanban Label', default=lambda s: _('In", "order to make it accessible by the recipient(s).\") @api.depends('rating_status', 'rating_status_period')", "action def action_view_all_rating(self): \"\"\" return the action to see all", "'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': self.env.context, }", "self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id =", "= _(\"task\") project_id = self.env.context.get('default_project_id', False) if project_id: name =", "help=\"Whether this project should be displayed on your dashboard.\") label_tasks", "date = fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task", "sat = fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False) sun = fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False)", "= fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task Project',", "= all_users - portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |= internal_users", "to achieve this task (including its sub-tasks).', tracking=True) subtask_planned_hours =", "in self: will_write_email = task.partner_id and task.partner_email != task.partner_id.email will_write_phone", "if not self.alias_enabled: self.alias_name = False def _compute_alias_enabled(self): for project", "first.')) return super().unlink() # --------------------------------------------------- # Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id',", "through the mail gateway. Indeed we # do not want", "|= project else: not_fav_projects |= project # Project User has", "= _( \"The task cannot be shared with the recipient(s)", "t._is_recurrence_valid()): date = fields.Date.today() number_occurrences = min(5, task.repeat_number if task.repeat_type", "'=', False), ('stage_id', '=', False)]) color = fields.Integer(string='Color Index') user_id", "search_domain += list(domain) # perform search, return the first found", "not in default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] =", "fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False)", "'=', False), ('company_id', '=', company_id)]\") partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email',", "return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)]) @api.model def", "if not default.get('name'): default['name'] = _(\"%s (copy)\", self.name) if self.recurrence_id:", "elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return recipients def _notify_email_header_dict(self):", "internal users: employees may see all project and tasks.\\n\" \"-", "Case management # ---------------------------------------- def stage_find(self, section_id, domain=[], order='sequence'): \"\"\"", "_date_name = \"date_assign\" _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access", "or len(recurring_dates) > 5: task.recurrence_message += '<li>...</li>' task.recurrence_message += '</ul>'", "= stage_view return { 'name': _('Delete Stage'), 'view_mode': 'form', 'res_model':", "task partner_id. Once the task partner_id has been set: 1)", "self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help) def message_subscribe(self,", "fields.Date.today() + timedelta(days=7) if 'repeat_weekday' in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0]", "email will also be updated.') elif will_write_phone: task.ribbon_message = _('By", "not set, stages must be default stages \"\"\" # collect", "'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message = False for task", "# suggested recipients. This heuristic allows to avoid ugly hacks", "vals.get('recurring_task') is True: rec_values = {rec_field: vals[rec_field] for rec_field in", "--------------------------------------------------- # Business Methods # --------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values):", "= ctx return action def action_recurring_tasks(self): return { 'name': 'Tasks", "def _get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])] name = fields.Char(\"Name\", index=True,", "'normal': task.kanban_state_label = task.legend_normal elif task.kanban_state == 'blocked': task.kanban_state_label =", "('october', 'October'), ('november', 'November'), ('december', 'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow", "self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context, create=False) } def action_subtask(self): action", "self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for", "= message.attachment_ids.filtered(lambda a: a.mimetype == 'image') if image_attachments: self.displayed_image_id =", "repeat_number = fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([ ('date',", "users and portal customers. If they are notified they should", "project.partner_id.user_ids return res def action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids", "self.name return action def action_view_account_analytic_line(self): \"\"\" return the action to", "fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed Portal Users\", domain=[('share', '=', True)]) doc_count =", "== 'month' and task.repeat_on_month == 'date') or (task.repeat_unit == 'year'", "['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0] for project in readgroup] +", "'Weekly'), ('bimonthly', 'Twice a Month'), ('monthly', 'Once a Month'), ('quarterly',", "\\ .sudo().read()[0] action['display_name'] = self.name return action def action_view_account_analytic_line(self): \"\"\"", "groups if self.project_id.privacy_visibility == 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, (", "res_id displayed_image_id = fields.Many2one('ir.attachment', domain=\"[('res_model', '=', 'project.task'), ('res_id', '=', id),", "default=_get_default_color) _sql_constraints = [ ('name_uniq', 'unique (name)', \"Tag name already", "the active field is set to False, it will allow", "', '.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The project visibility setting doesn't allow portal", "company_id = fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False, required=True, copy=True,", "task_linked_to_calendar = self.filtered( lambda task: task.project_id.resource_calendar_id and task.create_date ) for", "- 1][0] if 'repeat_until' in default_fields: vals['repeat_until'] = fields.Date.today() +", "is set to False, it will allow you to hide", "the task or issue is in that stage.') mail_template_id =", "the analytic lines of the project's analytic account \"\"\" action", "the stage search taken from the lead: - section_id: if", "_('Ready'), translate=True, required=True, help='Override the default value displayed for the", "self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])] name = fields.Char(\"Name\",", "subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users", "if project.privacy_visibility == 'portal' and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return", "= self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url': take_action, 'title': _('I take", "'repeat_weekday' in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def", "'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu',", "vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False}) if 'active'", "date_assign if vals.get('user_id') and 'date_assign' not in vals: vals['date_assign'] =", "self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result def", "== 'forever' or len(recurring_dates) > 5: task.recurrence_message += '<li>...</li>' task.recurrence_message", "('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else: recurrence_domain = [('recurrence_id',", "list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if all(d in default_fields for d", "Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages')", "+= '<li>%s</li>' % date.strftime(date_format) if task.repeat_type == 'after' and task.repeat_number", "in order to make it accessible by the recipient(s).\") @api.depends('rating_status',", "label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help=\"Label used for the", "fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([ ('this', 'This task'), ('subsequent', 'This", "the Month'), ], default='date', compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([ ('date',", "in self.env['project.task'].browse(task_ids): # preserve task name and stage, normally altered", "readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False)", "string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids',", "project in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_phone", "project.allowed_user_ids |= project.partner_id.user_ids return res def action_unlink(self): wizard = self.env['project.delete.wizard'].create({", "x in email_list if x.split('@')[0] not in aliases] @api.model def", "subtask_project_id = fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task Project', readonly=True) allow_subtasks = fields.Boolean(string=\"Allow", "string='Project Manager', related='project_id.user_id', readonly=True) company_id = fields.Many2one( 'res.company', string='Company', compute='_compute_company_id',", "default['recurrence_id'] = self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for", "will update the kanban state to 'ready for the new", "if task.kanban_state == 'normal': task.kanban_state_label = task.legend_normal elif task.kanban_state ==", "group_operator=\"avg\") working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator=\"avg\")", "or task.parent_id) and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for", "will allow you to hide the project without removing it.\")", "Part of Odoo. See LICENSE file for full copyright and", "if not res and self.project_id.partner_id: return self.project_id.partner_id return res def", "portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |= internal_users return res def", "0.0 task.working_days_open = 0.0 if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data", "project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result = super(Project,", "domain=[], order='sequence'): \"\"\" Override of the base.stage method Parameter of", "active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0] action['display_name'] = self.name return action", "required=True, help='Override the default value displayed for the blocked state", "True def _get_default_stage_id(self): \"\"\" Gives default stage_id \"\"\" project_id =", "'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage in self: disabled_projects = stage.project_ids.filtered(lambda", "\"\"\" Override to set alias of tasks to their project", "start-date must be lower than project end-date.') ] @api.depends('partner_id.email') def", "dict(self.env.context) ctx = {k: v for k, v in ctx.items()", "== 'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values and self.kanban_state", "on the stages for which you want to get the", "task.stage_find(task.project_id.id, [ ('fold', '=', False), ('is_closed', '=', False)]) else: task.stage_id", "default stage_id \"\"\" project_id = self.env.context.get('default_project_id') if not project_id: return", "= tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self): for task in self:", "Planned Hours\", compute='_compute_subtask_planned_hours', help=\"Sum of the time planned of all", "_compute_partner_email(self): for task in self: if task.partner_id and task.partner_id.email !=", "time planned of this task.\") user_id = fields.Many2one('res.users', string='Assigned to',", "'quarterly': 90, 'yearly': 365} for project in self: project.rating_request_deadline =", "= {'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id', '=', self.analytic_account_id.id)] return action", "to this task. Usually less or equal to the initially", "= fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours']", "and tasks.\\n\" \"- Invited portal and all internal users: employees", "'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if", "= task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for task in self: if", "compute='_compute_repeat', readonly=False) tue = fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False) wed = fields.Boolean(string=\"Wed\",", "to display.') rating_template_id = fields.Many2one( 'mail.template', string='Rating Email Template', domain=[('model',", "5: task.recurrence_message += '<li>...</li>' task.recurrence_message += '</ul>' if task.repeat_type ==", "alias aliases = self.mapped('project_id.alias_name') return [x for x in email_list", "pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids, {} )) portal_privacy", "'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], } # --------------------------------------------------- #", "do not propagate an active_test context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id',", "= '/my/project/%s' % project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning() for project", "copy=False) company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company) currency_id", "recurrence first.')) # stage change: update date_last_stage_update if 'stage_id' in", "take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url': take_action, 'title': _('I", "return { 'stage_id': task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id, } def", "email and phone number will also be updated.') elif will_write_email:", "of the stage search taken from the lead: - section_id:", "raise ValidationError(_('Error! You cannot create recursive hierarchy of task(s).')) @api.model", "compute='_compute_email_from', store=\"True\", readonly=False) allowed_user_ids = fields.Many2many('res.users', string=\"Visible to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids',", "store=True, readonly=False, index=True, tracking=True, check_company=True, change_default=True) planned_hours = fields.Float(\"Initially Planned", "[] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = [] if section_ids:", "project.\") favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members')", "_stage_find), if project_id not in default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id", "self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def _read_group_stage_ids(self, stages, domain, order): search_domain", "and self.kanban_state == 'done': return self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values:", "fields.Char( 'Grey Kanban Label', default=lambda s: _('In Progress'), translate=True, required=True,", "access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active = fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True,", "--------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for task in self: rating_template =", "recurrence.id if 'recurring_task' in vals and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks", "fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count = 0 recurring_tasks = self.filtered(lambda", "partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) company_id", "= ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain stage_ids = stages._search(search_domain,", "domain of displayed_image_id, we couln't use attachment_ids because a one2many", "default=None, records=None, company=None, doc_names=None): \"\"\" Override to set alias of", "restricted. Set the privacy to 'Visible by following customers' in", "super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients() for", "'project_allowed_internal_users_rel', string=\"Allowed Internal Users\", default=lambda self: self.env.user, domain=[('share', '=', False)])", "Year'), ], default='date', compute='_compute_repeat', readonly=False) mon = fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False)", "if self.env.user in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |=", "'=', False), ('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'], ['project_id'])", "return recipients def _notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict() if self.project_id:", "def _inverse_partner_phone(self): for task in self: if task.partner_id and task.partner_phone", "('subsequent', 'This and following tasks'), ('all', 'All tasks'), ], default='this',", "name = fields.Char(\"Name\", index=True, required=True, tracking=True) description = fields.Html() active", "return result def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Subscribe to", "'name': _('Parent Task'), 'view_mode': 'form', 'res_model': 'project.task', 'res_id': self.parent_id.id, 'type':", "= False def _get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit == 'week':", "('rating_request_deadline', '<=', fields.Datetime.now()) ]) for project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline()", "all the analytic lines of the project's analytic account \"\"\"", "= super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or {}) self.ensure_one() project_user_group_id", "and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids new_group =", "help=\"Label used for the tasks of the project.\", translate=True) tasks", "task in self: portal_users = task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids -", "fields.One2many('project.task', 'project_id', string=\"Task Activities\") resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working Time',", "else: default_project = self.project_id.subtask_project_id or self.project_id ctx = dict(self.env.context) ctx", "for day, fn in DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends(", "fields.Datetime.now()} return {'date_end': False} def unlink(self): if any(self.mapped('recurrence_id')): # TODO:", "# If depth <= 0, return all children without depth", "= str([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', 'in', self.ids),", "self.env.uid)]}) def action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all') \\", "compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open = duration_data['days'] else: task.working_hours_open =", "OR from .project_task_recurrence import DAYS, WEEKS class ProjectTaskType(models.Model): _name =", "be responsible if no other responsible is # found. create_context", "subtask_planned_hours = fields.Float(\"Sub-tasks Planned Hours\", compute='_compute_subtask_planned_hours', help=\"Sum of the time", "# display all subtasks of current task action['domain'] = [('id',", "# Delete the empty related analytic account analytic_accounts_to_delete = self.env['account.analytic.account']", "_read_group_stage_ids(self, stages, domain, order): search_domain = [('id', 'in', stages.ids)] if", "= fields.Many2one('project.task', string='Parent Task', index=True) child_ids = fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\",", "self.repeat_until and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count =", "customer will update the kanban state to 'ready for the", "task.create_date ) for task in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if", "= fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone =", "@api.model def _create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown", "= fields.Char(string='Email From', help=\"These people will receive email.\", index=True, compute='_compute_email_from',", "default=lambda self: self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel',", "repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([", "'in', self.ids)], ['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0] for project in", "self.env['project.task'] if depth == 1: return children return children +", "task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids - portal_users if task.project_id.privacy_visibility == 'followers':", "sequence = fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects',", "= self.env['project.project'].browse(project_id).label_tasks if name: tname = name.lower() self = self.with_context(", "an email will be sent when a task is pulled", "rec_fields} for task in self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'):", "state to 'ready for the new stage' (green bullet).\\n\" \"", "to assign', store=True, group_operator=\"avg\") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to", "accessible by the recipient(s).\") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task in", "message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids = [(6, 0,", "project in self: project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model", "\"\"\" # remove default author when going through the mail", "stage.project_ids.filtered(lambda p: not p.rating_active) if disabled_projects: stage.disabled_rating_warning = '\\n'.join('- %s'", "self.id), ('id', '!=', self.id)] # update context, with all default", "def _inverse_is_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in", "default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage', 'Rating when changing", "fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id',", "result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result def message_subscribe(self, partner_ids=None,", "index=True, copy=False, readonly=True) project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False,", "account analytic_accounts_to_delete = self.env['account.analytic.account'] for project in self: if project.analytic_account_id", "business # --------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for task in self:", "return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def _read_group_stage_ids(self, stages, domain, order):", "working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator=\"avg\") #", "Date', index=True, copy=False) date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True)", "= False if custom_values is None: custom_values = {} defaults", "super(Task, self).rating_get_partner_id() if not res and self.project_id.partner_id: return self.project_id.partner_id return", "'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': context, }", "= self.with_context(active_id=self.id, active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0] action['display_name'] = self.name", "], compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week", "new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) #", "sub: sub.internal or sub.default)).ids if project_subtypes else None if not", "if the Issue Tracker module is installed).\") privacy_visibility = fields.Selection([", "permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids: task.allowed_user_ids -= permission_removed if", "force_create=False) if p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self):", "and\\ (self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today())", "project end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self): for project in self:", "action_view_all_rating(self): \"\"\" return the action to see all the rating", "'allowed_internal_user_ids' in vals if allowed_users_changed: allowed_users = {project: project.allowed_user_ids for", "+ timedelta(days=7) if 'repeat_weekday' in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return", "compute='_compute_repeat', readonly=False) wed = fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False) thu = fields.Boolean(string=\"Thu\",", "which this project is linked for financial management. \" \"Use", "'until': task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)}", "project in self: if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |=", "users'), ('portal', 'Invited portal users and all internal users'), ],", "and \"stage_id\" not in vals: # 1) Allows keeping the", "readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False, ondelete='set null', domain=\"['|',", "- portal_users if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids", "task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility == 'portal':", "\"\"\" return { 'stage_id': task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id, }", "project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return", "compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users', string='Project Manager',", "task. Usually less or equal to the initially time planned", "set an email will be sent to the customer when", "'type_id', string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count', string=\"Task Count\") task_ids =", "'monthly': 30, 'quarterly': 90, 'yearly': 365} for project in self:", "% date.strftime(date_format) if task.repeat_type == 'after' and task.repeat_number > 5", "project users and managers recipients that can assign tasks and", "task in self} leftover = self.filtered(lambda rec: not rec.project_id) if", "= fields.Char('Name', required=True) color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [", "partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone", "default='Tasks', help=\"Label used for the tasks of the project.\", translate=True)", "= tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or ''))", "Mail gateway # --------------------------------------------------- def _track_template(self, changes): res = super(Task,", "if depth == 1: return children return children + children._get_all_subtasks(depth", "if task.recurrence_id: task[f] = task.recurrence_id[f] else: if task.recurring_task: task[f] =", "all the rating of the project and activate default filters\"\"\"", "and task.partner_email != task.partner_id.email will_write_phone = task.partner_id and task.partner_phone !=", "the empty related analytic account analytic_accounts_to_delete = self.env['account.analytic.account'] for project", "_compute_allowed_user_ids(self): for task in self: portal_users = task.allowed_user_ids.filtered('share') internal_users =", "'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day',", "0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task", "fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage', 'Rating", "be displayed on your dashboard.\") label_tasks = fields.Char(string='Use Tasks as',", "= message.partner_ids.filtered(lambda partner: partner.email == self.email_from) if new_partner: self.search([ ('partner_id',", "'All internal users'), ('portal', 'Invited portal users and all internal", "check_company=True, help=\"Analytic account to which this project is linked for", "'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals) if", "manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True) company_id = fields.Many2one(", "+ children._get_all_subtasks(depth - 1) def action_open_parent_task(self): return { 'name': _('Parent", "project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url() for", "analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic Account')), 'company_id': values.get('company_id')", "string='Customer', auto_join=True, tracking=True, domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\")", "store=True, group_operator=\"avg\") working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True,", "alias of tasks to their project if any. \"\"\" aliases", "and not \"company_id\" in vals: vals[\"company_id\"] = self.env[\"project.project\"].browse( project_id ).company_id.id", "You cannot create recursive hierarchy of task(s).')) @api.model def get_empty_list_help(self,", "(instead of _get_default_stage_id or _stage_find), if project_id not in default_stage:", "to False; however we do not # want the gateway", "if 'repeat_month' in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if", "'Months'), ('year', 'Years'), ], default='week', compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([", "not_fav_projects |= project # Project User has no write access", "'repeat_day' in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month' in default_fields:", "if not set, stages must be default stages \"\"\" #", "number will also be updated.') elif will_write_email: task.ribbon_message = _('By", "depth <= 0, return all children without depth limit def", "help='Override the default value displayed for the done state for", "the users subscribed to allowed portal users \"\"\" res =", "= all_users.filtered('share') internal_users = all_users - portal_users self.allowed_portal_user_ids |= portal_users", "return the first found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------", "for the done state for kanban selection, when the task", "and activate default filters\"\"\" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings", "children.active) if not children: return self.env['project.task'] if depth == 1:", "self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'): for project in self.filtered(lambda", "All internal users: employees may see all project and tasks.\\n\"", "_get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return [default_project_id] if default_project_id else None", "for task in tasks: if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return", "'tasks' not in default: self.map_tasks(project.id) return project @api.model def create(self,", "WEEKS class ProjectTaskType(models.Model): _name = 'project.task.type' _description = 'Task Stage'", "self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id', '=', self.analytic_account_id.id)]", "task.kanban_state_label = task.legend_blocked else: task.kanban_state_label = task.legend_done def _compute_access_url(self): super(Task,", "fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False) sat = fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False) sun =", "get (instead of _get_default_stage_id or _stage_find), if project_id not in", "compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) ribbon_message = fields.Char('Ribbon message',", "% self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name'))", "'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun',", "duration_data['days'] else: task.working_hours_close = 0.0 task.working_days_close = 0.0 (self -", "the customer's feedbacks.\") rating_status_period = fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'),", "if default_project else self.env.company.id, }) action['context'] = ctx return action", "the project without removing it.\") sequence = fields.Integer(default=10, help=\"Gives the", "task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif", "users: employees may only see the followed project and tasks.\\n\"", "may see project and tasks followed by\\n\" \" them or", "project.allowed_user_ids for project in self} # directly compute is_favorite to", "attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main Attachments\", help=\"Attachment that don't come", "are notified they should probably have access to the document.", "or self._get_weekdays()) and\\ (self.repeat_type != 'after' or self.repeat_number) and\\ (self.repeat_type", "in self: if not task._check_recursion(): raise ValidationError(_('Error! You cannot create", "and task.repeat_unit == 'week' task.repeat_show_month = task.recurring_task and task.repeat_unit ==", "[(6, 0, [self.env.uid])] name = fields.Char(\"Name\", index=True, required=True, tracking=True) description", "'=', False)]) @api.model def _default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return", "'repeat_until' in default_fields: vals['repeat_until'] = fields.Date.today() + timedelta(days=7) if 'repeat_weekday'", "in self: analytic_account = self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id, 'partner_id':", "vals: # archiving/unarchiving a project does it on its tasks,", "the defaults are correct (and computed once by project), #", "found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------ # CRUD overrides", "task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal':", "project in self.filtered(lambda x: x.privacy_visibility != 'portal'): project.access_warning = _(", "message.attachment_ids.filtered(lambda a: a.mimetype == 'image') if image_attachments: self.displayed_image_id = image_attachments[0]", "= super(Project, self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id = project.id if", "or task.repeat_type == 'forever' or len(recurring_dates) > 5: task.recurrence_message +=", "unlink(self): if any(self.mapped('recurrence_id')): # TODO: show a dialog to stop", "update the kanban state to 'ready for the new stage'", "fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True) company_id", "'partner_id': msg.get('author_id') } defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list", "string='Alias', ondelete=\"restrict\", required=True, help=\"Internal email associated with this project. Incoming", "super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] = defaults", "('res_id', 'in', self.task_ids.ids) ]) action['context'] = \"{'default_res_model': '%s','default_res_id': %d}\" %", "False), ('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'], ['project_id']) result", "assign', store=True, group_operator=\"avg\") working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close',", "--------------------------------------------------- # Mail gateway # --------------------------------------------------- def _track_template(self, changes): res", "project_id not in default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"]", "Year'), ('day', 'Day of the Year'), ], default='date', compute='_compute_repeat', readonly=False)", "or self.env.context.get('default_project_id') if project_id and not \"company_id\" in vals: vals[\"company_id\"]", "'portal': task._portal_ensure_token() return tasks def write(self, vals): now = fields.Datetime.now()", "rec_fields: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} for", "task in self: if task.partner_id and task.partner_email != task.partner_id.email: task.partner_id.email", "Invited portal and all internal users: employees may see everything.\"", "# Stage change: Update date_end if folded stage and date_last_stage_update", "tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self): for task in self: if", "project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility == 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids", "default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=', project_id)]\", copy=False) tag_ids = fields.Many2many('project.tags', string='Tags')", "= fields.Char( 'Red Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True,", "self._get_recurrence_fields() if rec_fields and vals.get('recurring_task') is True: rec_values = {rec_field:", "any(self.mapped('recurrence_id')): # TODO: show a dialog to stop the recurrence", "project_id and not \"company_id\" in vals: vals[\"company_id\"] = self.env[\"project.project\"].browse( project_id", "= fields.Datetime(\"Created On\", readonly=True, index=True) write_date = fields.Datetime(\"Last Updated On\",", "datetime from random import randint from odoo import api, fields,", "self: project.alias_enabled = project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self):", "= vals.keys() & self._get_recurrence_fields() if rec_fields and vals.get('recurring_task') is True:", "be a default stage; if not set, stages must be", "fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign,", "project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if not self.alias_enabled: self.alias_name", "come from message.\") # In the domain of displayed_image_id, we", "default='monthly') _sql_constraints = [ ('project_date_greater', 'check(date >= date_start)', 'Error! project", "_notify_get_groups(self, msg_vals=None): \"\"\" Handle project users and managers recipients that", "# If depth == 3, return children to third generation", "fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) attachment_ids", "the project is too restricted. Set the privacy of the", "vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id", "# recurrence rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields and", "currency_id = fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic", "= task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'date')", "_get_default_stage_id(self): \"\"\" Gives default stage_id \"\"\" project_id = self.env.context.get('default_project_id') if", "_('I take it')}] new_group[2]['actions'] = project_actions groups = [new_group] +", "+ child_task.subtask_planned_hours for child_task in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for", "} def action_subtask(self): action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display all subtasks", "from notification emails. Also give access button to portal users", "delete all of its tasks.')) # Delete the empty related", "Kanban Label', default=lambda s: _('Ready'), translate=True, required=True, help='Override the default", "readonly=False, required=True, copy=True, default=_default_company_id) color = fields.Integer(string='Color Index') user_email =", "project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project", "new_task.id tasks += new_task return project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self',", "task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self): for task", "allowed portal users \"\"\" res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)", "\"\"\" groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or {})", "= \"{'default_res_model': '%s','default_res_id': %d}\" % (self._name, self.id) return action def", "project.privacy_visibility == 'portal'): project.allowed_user_ids |= project.partner_id.user_ids return res def action_unlink(self):", "installed).\") privacy_visibility = fields.Selection([ ('followers', 'Invited internal users'), ('employees', 'All", "used res_model & res_id displayed_image_id = fields.Many2one('ir.attachment', domain=\"[('res_model', '=', 'project.task'),", "('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year', 'Years'), ], default='week',", "it will allow you to hide the project without removing", "analytic_accounts_to_delete.unlink() return result def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Subscribe", "'=', 'project.task')], help=\"If set an email will be sent to", "default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] = default_stage[project_id] # user_id change: update date_assign", "company=None, doc_names=None): \"\"\" Override to set alias of tasks to", "'name': task.name, 'company_id': project.company_id.id, } def map_tasks(self, new_project_id): \"\"\" copy", "= self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0] for", "return children to third generation # If depth <= 0,", "related analytic account analytic_accounts_to_delete = self.env['account.analytic.account'] for project in self:", "rec_fields = self._get_recurrence_fields() defaults = self.default_get(rec_fields) for task in self:", "randint from odoo import api, fields, models, tools, SUPERUSER_ID, _", "stage if 'stage_id' in vals and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active", "- project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids:", "% {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval > 0", "project in self: analytic_account = self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id,", "('sat', 'Saturday'), ('sun', 'Sunday'), ], string='Day Of The Week', compute='_compute_repeat',", "not children: return self.env['project.task'] if depth == 1: return children", "Actions # --------------------------------------------------- def toggle_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo()", "= fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage Update',", "to which this project is linked for financial management. \"", "group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in", "fields.Many2one('ir.attachment', domain=\"[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]\",", "task.legend_normal elif task.kanban_state == 'blocked': task.kanban_state_label = task.legend_blocked else: task.kanban_state_label", "return super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for task in self:", "as a list of commands so we used res_model &", "self: self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False)", "'=', company_id)]\", check_company=True, help=\"Analytic account to which this project is", "task.ribbon_message = False @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise", "self.env['ir.attachment'] for project in self: project.doc_count = Attachment.search_count([ '|', '&',", "[h for h in headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0, 'project.project-%s,", "def write(self, vals): now = fields.Datetime.now() if 'parent_id' in vals", "related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent Task', index=True) child_ids = fields.One2many('project.task',", "self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|', '&', ('res_model', '=', 'project.project'), ('res_id',", "allowed_users_changed: for project in self: permission_removed = allowed_users.get(project) - project.allowed_user_ids", "in self: project.is_favorite = self.env.user in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects", "task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message =", "week_start = fields.Datetime.today().weekday() if all(d in default_fields for d in", "not_fav_projects = self.env['project.project'].sudo() for project in self: if self.env.user in", "task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close = duration_data['days'] else:", "your dashboard.\") label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help=\"Label used", "stage change: update date_last_stage_update if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update']", "'Invited portal users and all internal users'), ], string='Visibility', required=True,", "internal_users @api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda", "'</ul>' if task.repeat_type == 'until': task.recurrence_message += _('<p><em>Number of tasks:", "project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids def", "format_date, get_lang from odoo.osv.expression import OR from .project_task_recurrence import DAYS,", "recurrence_update == 'subsequent': for task in self: recurrence_domain = OR([recurrence_domain,", "if not task._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy", "task in self: if task.partner_id: reason = _('Customer Email') if", "def _compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults = self.default_get(rec_fields) for task", "\"\"\" get the default value for the copied task on", "in disabled_projects) else: stage.disabled_rating_warning = False class Project(models.Model): _name =", "without customer means that it was created through the chatter", "not in aliases] @api.model def message_new(self, msg, custom_values=None): \"\"\" Overrides", "readonly=False) repeat_week = fields.Selection([ ('first', 'First'), ('second', 'Second'), ('third', 'Third'),", "'project_id' class ProjectTags(models.Model): \"\"\" Tags of project's tasks \"\"\" _name", "not res and self.project_id.partner_id: return self.project_id.partner_id return res def rating_apply(self,", "count\", compute='_compute_subtask_count') email_from = fields.Char(string='Email From', help=\"These people will receive", "if not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if", "# Check project is empty for project in self.with_context(active_test=False): if", "legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False) legend_done =", "task in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign =", "Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True) subtask_count = fields.Integer(\"Sub-task count\", compute='_compute_subtask_count') email_from =", "self.filtered(lambda rec: not rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company,", "project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self, task,", "going through the mail gateway. Indeed we # do not", "Projects.\") partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain=\"['|', ('company_id', '=',", "self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients() for task", "Attachment.search_count([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', '=', project.id), '&',", "index=True) write_date = fields.Datetime(\"Last Updated On\", readonly=True, index=True) date_end =", "'First'), ('second', 'Second'), ('third', 'Third'), ('last', 'Last'), ], default='first', compute='_compute_repeat',", "be updated.') else: task.ribbon_message = False @api.constrains('parent_id') def _check_parent_id(self): if", "], default='first', compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([ ('mon', 'Monday'), ('tue',", "partner.email == self.email_from) if new_partner: self.search([ ('partner_id', '=', False), ('email_from',", "copy=False, default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True)", "p.name for p in disabled_projects) else: stage.disabled_rating_warning = False class", "task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks += new_task return project.write({'tasks': [(6,", "= ast.literal_eval(self.alias_defaults or \"{}\") defaults['project_id'] = self.id return values #", "or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for task in self:", "self.env['project.project'].browse(project_id).label_tasks if name: tname = name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'),", "{}) create_context['default_user_id'] = False if custom_values is None: custom_values =", "32) ], compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([ ('first', 'First'), ('second',", "self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals)", "}) project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- # Rating business # ---------------------------------------------------", "= [('recurrence_id', 'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result = super(Task,", "project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for project in self: users", "value for the copied task on project duplication \"\"\" return", "project_id: name = self.env['project.project'].browse(project_id).label_tasks if name: tname = name.lower() self", "'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message = False", "= self.mapped('child_ids').filtered(lambda children: children.active) if not children: return self.env['project.task'] if", "\" them or by someone of their company.\") allowed_user_ids =", "= dict(self.env.context or {}) create_context['default_user_id'] = False if custom_values is", "that posting a message with a specified recipient (not a", "msg.get('subject') or _(\"No Subject\"), 'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id')", "return groups def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None): \"\"\" Override", "False class Project(models.Model): _name = \"project.project\" _description = \"Project\" _inherit", "if rec_fields and vals.get('recurring_task') is True: rec_values = {rec_field: vals[rec_field]", "'in', stages.ids)] if 'default_project_id' in self.env.context: search_domain = ['|', ('project_ids',", "allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url()", "Check project is empty for project in self.with_context(active_test=False): if project.tasks:", "stages must be default stages \"\"\" # collect all section_ids", "a list of tasks.\") stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True,", "Odoo. See LICENSE file for full copyright and licensing details.", "task.project_id = task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail gateway # ---------------------------------------------------", "normal state for kanban selection, when the task or issue", "Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True, help='Override the default", "self.filtered(lambda l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id')", "string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\",", "be the current project itself.\") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self:", "+= '<li>...</li>' task.recurrence_message += '</ul>' if task.repeat_type == 'until': task.recurrence_message", "_onchange_task_company(self): if self.project_id.company_id != self.company_id: self.project_id = False @api.depends('project_id.company_id') def", "change, the customer phone number will also be updated.') else:", "stage.\\n\" \" * A good feedback from the customer will", "'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags']", "can be in a stage even if the project is", "if 'tasks' not in default: self.map_tasks(project.id) return project @api.model def", "@api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task in self: if not task.project_id:", "aliases.get(task.project_id.id) for task in self} leftover = self.filtered(lambda rec: not", "of the tasks of the project:\\n\" \"- Invited internal users:", "\"\"\" Gives default stage_id \"\"\" project_id = self.env.context.get('default_project_id') if not", "section_id in section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain += list(domain) #", "all_users - portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |= internal_users return", "'context': self.env.context, } def unlink(self): # Check project is empty", "return action def action_view_account_analytic_line(self): \"\"\" return the action to see", "readonly=False) repeat_month = fields.Selection([ ('january', 'January'), ('february', 'February'), ('march', 'March'),", "'=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids =", "of Projects.\") partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain=\"['|', ('company_id',", "for f in rec_fields: if task.recurrence_id: task[f] = task.recurrence_id[f] else:", "readonly=False) allowed_user_ids = fields.Many2many('res.users', string=\"Visible to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False,", "def _compute_project_id(self): for task in self: if not task.project_id: task.project_id", "ast.literal_eval(action['context']) if action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by',", "partner_id, use the project partner_id if any, or else the", "readonly=True, index=True) date_end = fields.Datetime(string='Ending Date', index=True, copy=False) date_assign =", "in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_phone =", "readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent Task', index=True) child_ids =", "set alias of tasks to their project if any. \"\"\"", "if vals.get('partner_id') or vals.get('privacy_visibility'): for project in self.filtered(lambda project: project.privacy_visibility", "p] task.message_subscribe(partner_ids) return task def message_update(self, msg, update_vals=None): \"\"\" Override", "'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for task", "all internal users'), ], string='Visibility', required=True, default='portal', help=\"Defines the visibility", "Template', domain=[('model', '=', 'project.task')], help=\"If set and if the project's", "for project in self} # directly compute is_favorite to dodge", "when the task or issue is in that stage.') legend_done", "('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ]) for project in", "required=True, default=lambda self: self.env.company) currency_id = fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\", readonly=True)", "this task.\") user_id = fields.Many2one('res.users', string='Assigned to', default=lambda self: self.env.uid,", "if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients,", "task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if", "field in `default_get` 'default_company_id': default_project.company_id.id if default_project else self.env.company.id, })", "self.id return values # --------------------------------------------------- # Actions # --------------------------------------------------- def", "repeat_on_month = fields.Selection([ ('date', 'Date of the Month'), ('day', 'Day", "= task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail gateway # --------------------------------------------------- def", "fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count', string=\"Task", "fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use", "displaying a list of Projects.\") partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True,", "'=', False), ('email_from', '=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return", "default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] = default_stage[project_id] # user_id", "task in self: task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task", "string=\"Project Visibility\") # Computed field about working time elapsed between", "the document according to the email. \"\"\" # remove default", "'context': context, } def write(self, vals): if 'active' in vals", "fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string=\"Recurrent\") recurring_count = fields.Integer(string=\"Tasks", "for task in self.env['project.task'].browse(task_ids): # preserve task name and stage,", "self: if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result", "| project.allowed_portal_user_ids project.allowed_user_ids = users def _inverse_allowed_user(self): for project in", "the project's rating configuration is 'Rating when changing stage', then", "disabled_projects: stage.disabled_rating_warning = '\\n'.join('- %s' % p.name for p in", "compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit =", "to copy archived task, but do not propagate an active_test", "task.parent_id: # set the parent to the duplicated task defaults['parent_id']", "(len(section_ids) - 1) for section_id in section_ids: search_domain.append(('project_ids', '=', section_id))", "'date_assign' not in vals: vals['date_assign'] = now # recurrence fields", "Overrides mail_thread message_new that is called by the mailgateway through", "the default value for the copied task on project duplication", "odoo.tools.misc import format_date, get_lang from odoo.osv.expression import OR from .project_task_recurrence", "from the lead: - section_id: if set, stages must belong", "string=\"Closing Stage\", readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent Task', index=True)", "if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users", "will be sent when a task is pulled in another", "message_unsubscribe(self, partner_ids=None, channel_ids=None): \"\"\" Unsubscribe from all tasks when unsubscribing", "'Green Kanban Label', default=lambda s: _('Ready'), translate=True, required=True, help='Override the", "task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for task in self: if task.project_id:", "self: if not task._check_recursion(): raise ValidationError(_('Error! You cannot create recursive", "resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type',", "and following tasks'), ('all', 'All tasks'), ], default='this', store=False) recurrence_message", "self: for f in rec_fields: if task.recurrence_id: task[f] = task.recurrence_id[f]", "headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0, 'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects']", "def _compute_partner_phone(self): for project in self: if project.partner_id and project.partner_phone", "= {rec_field: vals[rec_field] for rec_field in rec_fields} for task in", "to the feedback for this stage.\\n\" \" * A good", "\"- All internal users: employees may see all project and", "if will_write_email and will_write_phone: task.ribbon_message = _('By saving this change,", "for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()): date =", "(including its sub-tasks).', tracking=True) subtask_planned_hours = fields.Float(\"Sub-tasks Planned Hours\", compute='_compute_subtask_planned_hours',", "search, return the first found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id #", "'portal_customer' and not portal_privacy: group_data['has_button_access'] = False elif group_name ==", "= (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes", "store=True, copy=False) ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city',", "make it accessible by the recipient(s).\") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self):", "unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False) # retrieves all the projects", "search taken from the lead: - section_id: if set, stages", "dict(action, context=action_context) # --------------------------------------------------- # Business Methods # --------------------------------------------------- @api.model", "get the customer's feedbacks.\") rating_status_period = fields.Selection([ ('daily', 'Daily'), ('weekly',", "project.partner_id.email def _inverse_partner_email(self): for project in self: if project.partner_id and", "for project in self: if project.partner_id and project.partner_email != project.partner_id.email:", "self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg) partner_ids = [p.id for p", "employees may only see the followed project and tasks.\\n\" \"-", "AccessError, ValidationError, RedirectWarning from odoo.tools.misc import format_date, get_lang from odoo.osv.expression", "be shared with the recipient(s) because the privacy of the", "def _compute_allowed_user_ids(self): for task in self: portal_users = task.allowed_user_ids.filtered('share') internal_users", "= fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator=\"avg\") working_days_close =", "(self.repeat_type != 'after' or self.repeat_number) and\\ (self.repeat_type != 'until' or", "'year' and task.repeat_on_year == 'date') task.repeat_show_week = task.recurring_task and (task.repeat_unit", "use attachment_ids because a one2many is represented as a list", "from datetime import timedelta, datetime from random import randint from", "False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id})", "def action_view_all_rating(self): \"\"\" return the action to see all the", "self._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of tasks.'))", "task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'day') or", "fields.Char('Name', required=True) color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [ ('name_uniq',", "= default_stage[project_id] # user_id change: update date_assign if vals.get('user_id'): vals['date_assign']", "fields.Float(\"Sub-tasks Planned Hours\", compute='_compute_subtask_planned_hours', help=\"Sum of the time planned of", "project.id), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids) ]) def", "customer replies to the feedback for this stage.\\n\" \" *", "'rating_status_period') def _compute_rating_request_deadline(self): periods = {'daily': 1, 'weekly': 7, 'bimonthly':", "in vals if allowed_users_changed: allowed_users = {project: project.allowed_user_ids for project", "for task in self: task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for", "when going through the mail gateway. Indeed we # do", "custom_values = {} defaults = { 'name': msg.get('subject') or _(\"No", "string=\"Phone\", readonly=False, store=True, copy=False) ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city", "def update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed:", "tasks.ids)]}) @api.returns('self', lambda value: value.id) def copy(self, default=None): if default", "project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url() for project in self: project.access_url", "_inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _order", "':', 'default_parent_id': self.id, # will give default subtask field in", "def _compute_rating_request_deadline(self): periods = {'daily': 1, 'weekly': 7, 'bimonthly': 15,", "task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task in self:", "update the task according to the email. \"\"\" email_list =", "to explicitly set user_id to False; however we do not", "bullet).\\n\" \" * A medium or a bad feedback will", "'May'), ('june', 'June'), ('july', 'July'), ('august', 'August'), ('september', 'September'), ('october',", "'child_of', self.id), ('id', '!=', self.id)] # update context, with all", "'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task in self: portal_users = task.allowed_user_ids.filtered('share')", "n=1): self.ensure_one() if self.repeat_unit == 'week': return [fn(n) for day,", "chatter using # suggested recipients. This heuristic allows to avoid", "sequence order when displaying a list of Projects.\") partner_id =", "dashboard.\") label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help=\"Label used for", "not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self): action =", "= lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups']", "kanban selection, when the task or issue is in that", "compute='_compute_company_id', store=True, readonly=False, required=True, copy=True, default=_default_company_id) color = fields.Integer(string='Color Index')", "& res_id displayed_image_id = fields.Many2one('ir.attachment', domain=\"[('res_model', '=', 'project.task'), ('res_id', '=',", "project partner_id if any, or else the parent task partner_id.", "'stage_id' in changes and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message':", "'user' and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids new_group", "wizard.id, 'target': 'new', 'context': self.env.context, } def unlink(self): # Check", "make it accessible by the recipient(s).\") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for", "= super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] =", "use the project partner_id if any, or else the parent", "import DAYS, WEEKS class ProjectTaskType(models.Model): _name = 'project.task.type' _description =", "[('recurrence_id', 'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals)", "== 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]}) return", "wed = fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False) thu = fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False)", "dict(self._context, create=False) } def action_subtask(self): action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display", "to their project if any. \"\"\" aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None,", "_compute_access_warning(self): super(Task, self)._compute_access_warning() for task in self.filtered(lambda x: x.project_id.privacy_visibility !=", "Periodical Rating: email will be sent periodically.\\n\\n\" \"Don't forget to", "'\\n'.join('- %s' % p.name for p in disabled_projects) else: stage.disabled_rating_warning", "'repeat_month' in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until'", "= self.env['project.task'] # We want to copy archived task, but", "= permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids: task.allowed_user_ids -= permission_removed", "shared with the recipient(s) because the privacy of the project", "if not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name) + ':', 'default_parent_id':", "for task in self: if task.project_id: if task.project_id not in", "False # takes all existing ratings _check_company_auto = True def", "all tasks when unsubscribing from a project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)", "copy=False) tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([ ('normal', 'In", "= task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from", "_order = \"priority desc, sequence, id desc\" _check_company_auto = True", "unsubscribing from a project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids,", "= fields.Integer(compute='_compute_attached_docs_count', string=\"Number of documents attached\") date_start = fields.Date(string='Start Date')", "Status', default=\"stage\", required=True, help=\"How to get customer feedback?\\n\" \"- Rating", "the action to see all the analytic lines of the", "False}) subtask_project_id = fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task Project', readonly=True) allow_subtasks =", ") for task in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign:", "_compute_subtask_count(self): for task in self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def", "If depth <= 0, return all children without depth limit", "double project creation self = self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals)", "task.repeat_unit == 'week' task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year'", "('res_id', 'in', self.ids), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', self.task_ids.ids)", "False), ('company_id', '=', company_id)]\") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id =", "self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |= internal_users return res def message_unsubscribe(self,", "= self.name return action def action_view_account_analytic_line(self): \"\"\" return the action", "project start-date must be lower than project end-date.') ] @api.depends('partner_id.email')", "self.mapped('child_ids').filtered(lambda children: children.active) if not children: return self.env['project.task'] if depth", "all children without depth limit def _get_all_subtasks(self, depth=0): children =", "sequence order when displaying a list of tasks.\") stage_id =", "same. \"\"\" for task in self: if task.partner_id: if task.project_id.partner_id:", "return 'project_id' class ProjectTags(models.Model): \"\"\" Tags of project's tasks \"\"\"", "\"\"\" Override of the base.stage method Parameter of the stage", "('recurring_task', '=', True)]).write({'recurring_task': False}) if 'active' in vals: # archiving/unarchiving", "vals[rec_field] for rec_field in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence =", "take_action, 'title': _('I take it')}] new_group[2]['actions'] = project_actions groups =", "_compute_allowed_users(self): for project in self: users = project.allowed_internal_user_ids | project.allowed_portal_user_ids", "recursive hierarchy of task(s).')) @api.model def get_empty_list_help(self, help): tname =", "recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' % date.strftime(date_format) if task.repeat_type == 'after'", "rating_template_id = fields.Many2one( 'mail.template', string='Rating Email Template', domain=[('model', '=', 'project.task')],", "old to new project \"\"\" project = self.browse(new_project_id) tasks =", "in self.filtered(lambda x: x.privacy_visibility != 'portal'): project.access_warning = _( \"The", "stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed: return {'date_end':", "in DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit',", "def get_empty_list_help(self, help): tname = _(\"task\") project_id = self.env.context.get('default_project_id', False)", "def _compute_access_warning(self): super(Task, self)._compute_access_warning() for task in self.filtered(lambda x: x.project_id.privacy_visibility", "= fields.Many2one('project.project', string='Sub-task Project', ondelete=\"restrict\", help=\"Project in which sub-tasks of", "project_subtypes else None if not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids,", "!= task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self):", "= self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids':", "or self.repeat_until and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count", "vals and not vals.get('active') and any(self.mapped('recurrence_id')): # TODO: show a", "Ensure the defaults are correct (and computed once by project),", "delta = task.repeat_interval if task.repeat_unit == 'day' else 1 recurring_dates", "the tasks of the project:\\n\" \"- Invited internal users: employees", "of the Month'), ('day', 'Day of the Month'), ], default='date',", "partner_id is automatically changed also. 2) if the parent task", "== 'day' else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta),", "= True return groups def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None):", "'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id':", "raise ValidationError(_(\"The project visibility setting doesn't allow portal users to", "task.company_id = task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for task in self:", "raise UserError(_('You cannot delete a project containing tasks. You can", "else 5) delta = task.repeat_interval if task.repeat_unit == 'day' else", "company_id)]\") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email =", "super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks", "compute='_compute_repeat', readonly=False) sat = fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False) sun = fields.Boolean(string=\"Sun\",", "if task.partner_id: if task.project_id.partner_id: task.partner_id = task.project_id.partner_id else: task.partner_id =", "in pdata['groups'] if self.project_id.privacy_visibility == 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func", "[(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for", "msg, custom_values=None): \"\"\" Overrides mail_thread message_new that is called by", "= _(\"%s (copy)\") % (self.name) project = super(Project, self).copy(default) if", "inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone',", "probably have access to the document. \"\"\" groups = super(Task,", "comment and incoming emails in communication history website_message_ids = fields.One2many(domain=lambda", "and task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and task.repeat_on_year", "but do not propagate an active_test context key task_ids =", "task.project_id.resource_calendar_id and task.create_date ) for task in task_linked_to_calendar: dt_create_date =", "project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None task_subtypes = (project_subtypes.mapped('parent_id')", "is called by the mailgateway through message_process. This override updates", "= [('|')] * (len(section_ids) - 1) for section_id in section_ids:", "a task as its parent task.\")) if 'active' in vals", "kanban state to 'ready for the new stage' (green bullet).\\n\"", "fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids',", "allowed_user_ids = fields.Many2many('res.users', string=\"Visible to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False)", "== 'portal': task._portal_ensure_token() return tasks def write(self, vals): now =", "see the followed project and tasks.\\n\" \"- All internal users:", "self.ids: raise UserError(_(\"Sorry. You can't set a task as its", "are automatically synchronized \" \"with Tasks (or optionally Issues if", "self: if project.partner_id and project.partner_id.email != project.partner_email: project.partner_email = project.partner_id.email", "default='date', compute='_compute_repeat', readonly=False) mon = fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False) tue =", "# --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): \"\"\" If a task", "def _check_no_portal_allowed(self): for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'):", "self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids }) context", "# found. create_context = dict(self.env.context or {}) create_context['default_user_id'] = False", "True if 'repeat_day' in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month'", "default_project_id = self.env.context.get('default_project_id') return [default_project_id] if default_project_id else None active", "context=action_context) # --------------------------------------------------- # Business Methods # --------------------------------------------------- @api.model def", "management # ---------------------------------------- def stage_find(self, section_id, domain=[], order='sequence'): \"\"\" Override", "list of tasks.\") stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False,", "task.partner_phone != task.partner_id.phone: task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self): for task", "\"project.tags\" _description = \"Project Tags\" def _get_default_color(self): return randint(1, 11)", "Parameter of the stage search taken from the lead: -", "# 1) Allows keeping the batch creation of tasks #", "fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\",", "@api.depends('recurring_task') def _compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults = self.default_get(rec_fields) for", "first delete all of its tasks.')) # Delete the empty", "task or issue is in that stage.') legend_done = fields.Char(", "required=True, help=\"Internal email associated with this project. Incoming emails are", "empty related analytic account analytic_accounts_to_delete = self.env['account.analytic.account'] for project in", "when the task or issue is in that stage.') legend_normal", "'=', task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from", "'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]) color", "'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new',", "kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True) create_date = fields.Datetime(\"Created", "feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self):", "to set up the mail templates on the stages for", "portal_users = task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids - portal_users if task.project_id.privacy_visibility", "updated.') elif will_write_phone: task.ribbon_message = _('By saving this change, the", "_order = \"sequence, name, id\" _rating_satisfaction_days = False # takes", "self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4,", "= super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share')", "= [] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = [] if", "emails are automatically synchronized \" \"with Tasks (or optionally Issues", "if not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail", "task, but do not propagate an active_test context key task_ids", "pulled in another stage.\\n\" \"- Periodical Rating: email will be", "task.recurrence_message = '<ul>' for date in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>'", "'repeat_on_year') def _compute_repeat_visibility(self): for task in self: task.repeat_show_day = task.recurring_task", "stage.disabled_rating_warning = '\\n'.join('- %s' % p.name for p in disabled_projects)", "the customer replies to the feedback for this stage.\\n\" \"", "tasks: if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return tasks def write(self,", "0, [self.env.uid])] name = fields.Char(\"Name\", index=True, required=True, tracking=True) description =", "Explanation', readonly=True, related_sudo=False) is_closed = fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\", readonly=True, related_sudo=False)", "self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if 'recurring_task' in vals and not", "task in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self):", "super(Task, self)._track_template(changes) test_task = self[0] if 'stage_id' in changes and", "field is set to False, it will allow you to", "readonly=False) thu = fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False) fri = fields.Boolean(string=\"Fri\", compute='_compute_repeat',", "super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage in self:", "fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self):", "in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [ ('fold', '=', False), ('is_closed',", "any. \"\"\" aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res =", "to the initially time planned of this task.\") user_id =", "allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed", "everything.\" \" Portal users may see project and tasks followed", "lines of the project's analytic account \"\"\" action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\")", "super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self): return 'project_id' class", "task.project_id: if task.project_id not in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [", "for project in self: if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete", "'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message = False for task in self.filtered(lambda", "return the action to see all the rating of the", "self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def create(self, vals_list): default_stage = dict()", "project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked =", "def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Add the users subscribed", "changing stage if 'kanban_state' not in vals: vals['kanban_state'] = 'normal'", "# reset kanban state when changing stage if 'kanban_state' not", "add the portal user subscribed to allowed portal users \"\"\"", "vals[\"stage_id\"] = default_stage[project_id] # user_id change: update date_assign if vals.get('user_id'):", "self)._compute_access_url() for task in self: task.access_url = '/my/task/%s' % task.id", "alias_id = fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\", required=True, help=\"Internal email associated with", "(green bullet).\\n\" \" * A medium or a bad feedback", "active field is set to False, it will allow you", "project.task_ids: task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks' in vals and not", "recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks = super().create(vals_list) for", "def default_get(self, default_fields): vals = super(Task, self).default_get(default_fields) days = list(DAYS.keys())", "dashboard', help=\"Whether this project should be displayed on your dashboard.\")", "string='Kanban Ongoing Explanation', readonly=True, related_sudo=False) is_closed = fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\",", "reason = _('Customer Email') if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id,", "saving this change, the customer phone number will also be", "self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'): task.access_warning = _( \"The task", "children: return self.env['project.task'] if depth == 1: return children return", "'=', False), ('stage_id', '=', False)], ['project_id'], ['project_id']) result = dict((data['project_id'][0],", "in ctx.items() if not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name) +", "def _compute_subtask_count(self): for task in self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id')", "of displayed_image_id, we couln't use attachment_ids because a one2many is", "_rating_satisfaction_days = False # takes all existing ratings _check_company_auto =", "= dict() for vals in vals_list: project_id = vals.get('project_id') or", "search_domain = [] if section_ids: search_domain = [('|')] * (len(section_ids)", "super(Project, self)._compute_access_url() for project in self: project.access_url = '/my/project/%s' %", "if allowed_users_changed: allowed_users = {project: project.allowed_user_ids for project in self}", "False @api.returns('self', lambda value: value.id) def copy(self, default=None): if default", "a list of commands so we used res_model & res_id", "= self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count')", "in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals) if vals", "cannot delete recurring tasks. Please, disable the recurrence first.')) return", "mailgateway through message_process. This override updates the document according to", "= _('By saving this change, the customer email will also", "self.project_id.partner_id: return self.project_id.partner_id return res def rating_apply(self, rate, token=None, feedback=None,", "'=', project_id)]\", copy=False) tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([", "count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]:", "and (task.repeat_unit == 'month' and task.repeat_on_month == 'date') or (task.repeat_unit", "task in self: task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id)", "return stages.browse(stage_ids) active = fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True, required=True,", "help='Override the default value displayed for the normal state for", "('rating_active', '=', True), ('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ])", "('april', 'April'), ('may', 'May'), ('june', 'June'), ('july', 'July'), ('august', 'August'),", "\"\"\" Add the users subscribed to allowed portal users \"\"\"", "'in', self.task_ids.ids) ]) action['context'] = \"{'default_res_model': '%s','default_res_id': %d}\" % (self._name,", "tracking=True) create_date = fields.Datetime(\"Created On\", readonly=True, index=True) write_date = fields.Datetime(\"Last", "current task action['domain'] = [('id', 'child_of', self.id), ('id', '!=', self.id)]", "If depth == 3, return children to third generation #", "randint(1, 11) name = fields.Char('Name', required=True) color = fields.Integer(string='Color', default=_get_default_color)", "help=\"Sum of the time planned of all the sub-tasks linked", "the Year'), ('day', 'Day of the Year'), ], default='date', compute='_compute_repeat',", "== 'portal_customer' and not portal_privacy: group_data['has_button_access'] = False elif group_name", "considered as closed.\") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self", "odoo import api, fields, models, tools, SUPERUSER_ID, _ from odoo.exceptions", "# TODO: show a dialog to stop the recurrence raise", "'!=', self.id)] # update context, with all default values as", "a follower, a specific one) # on a document without", "(self.name,) action_context = ast.literal_eval(action['context']) if action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name']", "@api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods = {'daily': 1, 'weekly': 7,", "project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for project in", "= {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count} for task in", "+ timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day,", "'ilike', 'image')]\", string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation',", "doc_names=None): \"\"\" Override to set alias of tasks to their", "task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'date') or", "('followers', 'Invited internal users'), ('employees', 'All internal users'), ('portal', 'Invited", "False def _get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit == 'week': return", "\"{'default_res_model': '%s','default_res_id': %d}\" % (self._name, self.id) return action def _compute_is_favorite(self):", "tasks'), ('all', 'All tasks'), ], default='this', store=False) recurrence_message = fields.Char(string='Next", "leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return res def email_split(self,", "def _compute_kanban_state_label(self): for task in self: if task.kanban_state == 'normal':", "self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage in self: disabled_projects", "gateway user to be responsible if no other responsible is", "periods = {'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30,", "sub-tasks of the current project will be created. It can", "empty for project in self.with_context(active_test=False): if project.tasks: raise UserError(_('You cannot", "index=True, string=\"Priority\") sequence = fields.Integer(string='Sequence', index=True, default=10, help=\"Gives the sequence", "related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages') task_count", "by the recipient(s).\") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods = {'daily':", "in groups: if group_name in ('customer', 'user') or group_name ==", "= fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True, required=True, index=True) description =", "vals.get('user_id') and 'date_assign' not in vals: vals['date_assign'] = now #", "updated.') else: task.ribbon_message = False @api.constrains('parent_id') def _check_parent_id(self): if not", "_('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email'))", "legend_done = fields.Char( 'Green Kanban Label', default=lambda s: _('Ready'), translate=True,", "auto_join=True, tracking=True, domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\") partner_email", "project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name =", "t.project_id.privacy_visibility != 'portal'): portal_users = task.allowed_user_ids.filtered('share') if portal_users: user_names =", "or self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True, }) return analytic_account def", "order when displaying a list of tasks.\") stage_id = fields.Many2one('project.task.type',", "sent when a task is pulled in another stage.\\n\" \"-", "'=', company_id)]\") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email", "# --------------------------------------------------- # Rating business # --------------------------------------------------- def _send_task_rating_mail(self, force_send=False):", "in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p] task.message_subscribe(partner_ids) return task def", "the task according to the email. \"\"\" email_list = self.email_split(msg)", "the recipient(s).\") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods = {'daily': 1,", "store=True, group_operator=\"avg\") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True,", "task as its parent task.\")) if 'active' in vals and", "- 1) def action_open_parent_task(self): return { 'name': _('Parent Task'), 'view_mode':", "or else the parent task partner_id. Once the task partner_id", "user.id) for user in new_allowed_users]}) return res # ---------------------------------------- #", "channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda", "= duration_data['days'] else: task.working_hours_close = 0.0 task.working_days_close = 0.0 (self", "and tasks followed by\\n\" \" them or by someone of", "and not self.partner_id: # we consider that posting a message", "'project.task')], help=\"If set an email will be sent to the", "Month'), ], default='date', compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([ ('date', 'Date", "readonly=True) subtask_count = fields.Integer(\"Sub-task count\", compute='_compute_subtask_count') email_from = fields.Char(string='Email From',", "# recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string=\"Recurrent\") recurring_count", "users \"\"\" res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes =", "will_write_phone: task.ribbon_message = _('By saving this change, the customer email", "recipient(s).\") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task in self: task.subtask_planned_hours =", "compute='_compute_subtask_count') email_from = fields.Char(string='Email From', help=\"These people will receive email.\",", "x.split('@')[0] not in aliases] @api.model def message_new(self, msg, custom_values=None): \"\"\"", "recurrence fields rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields: rec_values", "leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return res def email_split(self, msg): email_list", "company=company, doc_names=doc_names)) return res def email_split(self, msg): email_list = tools.email_split((msg.get('to')", "project itself.\") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks =", "string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain=\"['|', ('company_id', '=', False), ('company_id', '=',", "else: task[f] = False def _get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit", "self.name action_context.pop('group_by', None) return dict(action, context=action_context) # --------------------------------------------------- # Business", "return self.project_id.partner_id return res def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None):", "be sent periodically.\\n\\n\" \"Don't forget to set up the mail", "Name', required=True, translate=True) description = fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids", "Update', index=True, copy=False, readonly=True) project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True,", "{ 'name': _('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')],", "= fields.Selection([ ('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year', 'Years'),", "self.env.context.get('default_project_id') if project_id and not \"company_id\" in vals: vals[\"company_id\"] =", "self.search([ ('partner_id', '=', False), ('email_from', '=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id':", "children._get_all_subtasks(depth - 1) def action_open_parent_task(self): return { 'name': _('Parent Task'),", "= super(Task, self)._message_get_suggested_recipients() for task in self: if task.partner_id: reason", "def _create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic", "= self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return { 'name': _('Confirmation'), 'view_mode':", "readonly=False) repeat_type = fields.Selection([ ('forever', 'Forever'), ('until', 'End Date'), ('after',", "privacy to 'Visible by following customers' in order to make", "'default_parent_id': self.id, # will give default subtask field in `default_get`", "portal_privacy = self.project_id.privacy_visibility == 'portal' for group_name, group_method, group_data in", "'new', 'context': self.env.context, } def unlink(self): # Check project is", "task.repeat_unit == 'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults", "== self.email_from) if new_partner: self.search([ ('partner_id', '=', False), ('email_from', '=',", "|= project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result def", "as 'quick_create' does not contains all field in its view", "any, or else the parent task partner_id. Once the task", "readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=', project_id)]\", copy=False)", "fields.Datetime(string='Ending Date', index=True, copy=False) date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False,", "self.env.context.get('default_project_id') if not project_id: return False return self.stage_find(project_id, [('fold', '=',", "in self.env.context: search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain", "new_partner: self.search([ ('partner_id', '=', False), ('email_from', '=', new_partner.email), ('stage_id.fold', '=',", "in self: if task.kanban_state == 'normal': task.kanban_state_label = task.legend_normal elif", "analytic lines of the project's analytic account \"\"\" action =", "Rating when changing stage: an email will be sent when", "True), ('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ]) for project", "readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\", required=True, help=\"Internal email associated", "string='Tags') kanban_state = fields.Selection([ ('normal', 'In Progress'), ('done', 'Ready'), ('blocked',", "'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly') _sql_constraints = [", "and not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals)", "toggle_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in self:", "'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat',", "ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=', project_id)]\", copy=False) tag_ids", "'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for task in", "'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count', string=\"Task Count\")", "in vals or 'allowed_internal_user_ids' in vals if allowed_users_changed: allowed_users =", "task is pulled in another stage.\\n\" \"- Periodical Rating: email", "' % self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] =", "tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id)", "and task.partner_phone != task.partner_id.phone: task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self): for", "('res_id', '=', id), ('mimetype', 'ilike', 'image')]\", string='Cover Image') legend_blocked =", "('email_from', '=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message,", "project.subtask_project_id = project for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if", "else: recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result", "for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'): portal_users =", "new_group = ('group_project_user', group_func, {}) if not self.user_id and not", "'=', False), ('is_closed', '=', False)]) @api.model def _default_company_id(self): if self._context.get('default_project_id'):", "field in its view if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else:", "# Project User has no write access for project. not_fav_projects.write({'favorite_user_ids':", "project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self', lambda value: value.id) def copy(self,", "child_ids = fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\", context={'active_test': False}) subtask_project_id = fields.Many2one('project.project',", "self.project_id.subtask_project_id or self.project_id ctx = dict(self.env.context) ctx = {k: v", "internal_users = all_users - portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |=", "allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals if", "= fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main Attachments\", help=\"Attachment that don't come from", "on stage if 'stage_id' in vals and vals.get('stage_id'): self.filtered(lambda x:", "Month'), ('day', 'Day of the Month'), ], default='date', compute='_compute_repeat', readonly=False)", "to avoid ugly hacks in JS. new_partner = message.partner_ids.filtered(lambda partner:", "# -*- coding: utf-8 -*- # Part of Odoo. See", "action['context'] = ctx return action def action_recurring_tasks(self): return { 'name':", "forget to set up the mail templates on the stages", "fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True) subtask_count = fields.Integer(\"Sub-task count\", compute='_compute_subtask_count') email_from", "unlink(self): # Check project is empty for project in self.with_context(active_test=False):", "% p.name for p in disabled_projects) else: stage.disabled_rating_warning = False", "on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'):", "status', default=False, help=\"Automatically modify the kanban state when the customer", "'mail.mail_notification_light' }) return res def _creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self,", "projects = self.search([ ('rating_active', '=', True), ('rating_status', '=', 'periodic'), ('rating_request_deadline',", "'&', ('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids) ]) def _compute_task_count(self):", "get_lang from odoo.osv.expression import OR from .project_task_recurrence import DAYS, WEEKS", "image_attachments: self.displayed_image_id = image_attachments[0] if self.email_from and not self.partner_id: #", "task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month ==", "self: recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=',", "= fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email',", "there are no records in that stage to display.') rating_template_id", "fields.Selection([ ('forever', 'Forever'), ('until', 'End Date'), ('after', 'Number of Repetitions'),", "if allowed_users_changed: for project in self: permission_removed = allowed_users.get(project) -", "self: if task.partner_id: if task.project_id.partner_id: task.partner_id = task.project_id.partner_id else: task.partner_id", "self.project_id.privacy_visibility == 'portal' for group_name, group_method, group_data in groups: if", "init_values and self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in", "Users\", default=lambda self: self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids = fields.Many2many('res.users',", "allowed_users = project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users -", "None: custom_values = {} defaults = { 'name': msg.get('subject') or", "for task in self: if task.partner_id and task.partner_id.email != task.partner_email:", "'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False),", "], 'Customer Ratings Status', default=\"stage\", required=True, help=\"How to get customer", "disable the recurrence first.')) return super().unlink() # --------------------------------------------------- # Subtasks", "return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values and self.kanban_state == 'done':", "in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'): task.access_warning = _( \"The", "are considered as closed.\") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False):", "= \"Task\" _date_name = \"date_assign\" _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin',", "2) Ensure the defaults are correct (and computed once by", "compute='_compute_subtask_planned_hours', help=\"Sum of the time planned of all the sub-tasks", "task.stage_id = False @api.returns('self', lambda value: value.id) def copy(self, default=None):", "task.partner_id and task.partner_phone != task.partner_id.phone if will_write_email and will_write_phone: task.ribbon_message", "in a stage even if the project is not assigned", "self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda", "project.partner_phone != project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if", "default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] = default_stage[project_id] #", "'Day of the Month'), ], default='date', compute='_compute_repeat', readonly=False) repeat_on_year =", "visibility of the tasks of the project:\\n\" \"- Invited internal", "\"with Tasks (or optionally Issues if the Issue Tracker module", "_compute_ribbon_message(self): for task in self: will_write_email = task.partner_id and task.partner_email", "from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning from odoo.tools.misc import", "direct children # If depth == 3, return children to", "task on project duplication \"\"\" return { 'stage_id': task.stage_id.id, 'name':", "project cannot be shared with the recipient(s) because the privacy", "(%s)\", user_names)) def _compute_attachment_ids(self): for task in self: attachment_ids =", "_('Customer Email') if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif", "defaults = self._map_tasks_default_valeus(task, project) if task.parent_id: # set the parent", "dialog to stop the recurrence raise UserError(_('You cannot archive recurring", "('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'],", "in self: if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id", "existing ratings _check_company_auto = True def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment']", "the Year'), ], default='date', compute='_compute_repeat', readonly=False) mon = fields.Boolean(string=\"Mon\", compute='_compute_repeat',", "return { 'name': _('Delete Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views':", "= '\\n'.join('- %s' % p.name for p in disabled_projects) else:", "parent_id = fields.Many2one('project.task', string='Parent Task', index=True) child_ids = fields.One2many('project.task', 'parent_id',", "Analytic Account')), 'company_id': values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True,", "string='Project Manager', default=lambda self: self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use email", "task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in", "else: stage.disabled_rating_warning = False class Project(models.Model): _name = \"project.project\" _description", "task.kanban_state_label = task.legend_normal elif task.kanban_state == 'blocked': task.kanban_state_label = task.legend_blocked", "tasks followed by\\n\" \" them or by someone of their", "analytic_account def _create_analytic_account(self): for project in self: analytic_account = self.env['account.analytic.account'].create({", "project.partner_id and project.partner_id.email != project.partner_email: project.partner_email = project.partner_id.email def _inverse_partner_email(self):", "= fields.Char(string='Title', tracking=True, required=True, index=True) description = fields.Html(string='Description') priority =", "string='Assigned to', default=lambda self: self.env.uid, index=True, tracking=True) partner_id = fields.Many2one('res.partner',", "res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light'", "favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in self: if", "= list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids)) wizard =", "(copy)\", self.name) if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return super(Task, self).copy(default)", "'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now # reset kanban", "by someone of their company.\") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user')", "True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return res def _creation_subtype(self):", "_message_post_after_hook(self, message, msg_vals): if message.attachment_ids and not self.displayed_image_id: image_attachments =", "'type': 'ir.actions.act_window', 'context': dict(self._context, create=False) } def action_subtask(self): action =", "management. \" \"Use an analytic account to record cost and", "Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window',", "['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for", "customer's feedbacks.\") rating_status_period = fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly',", "task.partner_id = task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for", "current project itself.\") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks", "default filters\"\"\" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of %s')", "if disabled_projects: stage.disabled_rating_warning = '\\n'.join('- %s' % p.name for p", "displayed on your dashboard.\") label_tasks = fields.Char(string='Use Tasks as', default='Tasks',", "and all internal users'), ], string='Visibility', required=True, default='portal', help=\"Defines the", "= project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids = users def _inverse_allowed_user(self): for", "project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids: task.allowed_user_ids", "project.allowed_user_ids = users def _inverse_allowed_user(self): for project in self: allowed_users", "'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], } # --------------------------------------------------- # Rating", "'Rating when changing stage'), ('periodic', 'Periodical Rating') ], 'Customer Ratings", "help=\"These people will receive email.\", index=True, compute='_compute_email_from', store=\"True\", readonly=False) allowed_user_ids", "--------------------------------------------------- # Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): \"\"\"", "copy=False) date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True) date_deadline =", "tasks of the project.\", translate=True) tasks = fields.One2many('project.task', 'project_id', string=\"Task", "or vals.get('privacy_visibility'): for project in self.filtered(lambda project: project.privacy_visibility == 'portal'):", "('july', 'July'), ('august', 'August'), ('september', 'September'), ('october', 'October'), ('november', 'November'),", "_description = \"Task\" _date_name = \"date_assign\" _inherit = ['portal.mixin', 'mail.thread.cc',", "return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): \"\"\" Handle", "customer portal: include comment and incoming emails in communication history", "'in', self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self):", "domain=[('model', '=', 'project.task')], help=\"If set an email will be sent", "fields.Boolean('Closing Stage', help=\"Tasks in this stage are considered as closed.\")", "('company_id', '=', company_id)]\", check_company=True, help=\"Analytic account to which this project", "force_send=force_send) def rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id() if not res", "\"\"\" return the action to see all the rating of", "compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([ ('date', 'Date of the Month'),", "fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email',", "Computed field about working time elapsed between record creation and", "batch creation of tasks # 2) Ensure the defaults are", "'=', 'project.task'), ('res_id', 'in', project.task_ids.ids) ]) def _compute_task_count(self): task_data =", "create_context['default_user_id'] = False if custom_values is None: custom_values = {}", "children = self.mapped('child_ids').filtered(lambda children: children.active) if not children: return self.env['project.task']", "self: if task.project_id: if task.project_id not in task.stage_id.project_ids: task.stage_id =", "= fields.Integer(string='Color Index') user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self:", "has no write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids':", "this section or be a default stage; if not set,", "update_vals=update_vals) def _message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients() for task in", "return children return children + children._get_all_subtasks(depth - 1) def action_open_parent_task(self):", "default['name'] = _(\"%s (copy)\", self.name) if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id", "|= portal_users self.allowed_internal_user_ids |= internal_users return res def message_unsubscribe(self, partner_ids=None,", "of its tasks.')) # Delete the empty related analytic account", "recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([ ('this', 'This task'),", "task[f] = task.recurrence_id[f] else: if task.recurring_task: task[f] = defaults.get(f) else:", "thu = fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False) fri = fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False)", "may see all project and tasks.\\n\" \"- Invited portal and", "self: project.is_favorite = self.env.user in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects =", "'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target':", "one directly from notification emails. Also give access button to", "disable the recurrence first.')) # stage change: update date_last_stage_update if", "'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids = [(6,", "if 'repeat_weekday' in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi", "tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or '')) #", "We want to copy archived task, but do not propagate", "in vals: # archiving/unarchiving a project does it on its", "return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): \"\"\" Handle project users", "the project's tasks. (%s)\", user_names)) def _compute_attachment_ids(self): for task in", "@api.model def message_new(self, msg, custom_values=None): \"\"\" Overrides mail_thread message_new that", "p: not p.rating_active) if disabled_projects: stage.disabled_rating_warning = '\\n'.join('- %s' %", "'normal' # user_id change: update date_assign if vals.get('user_id') and 'date_assign'", "channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share')", "project.privacy_visibility == 'portal' and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return project", "self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def _read_group_stage_ids(self, stages, domain,", "'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly': 365} for project", "self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users", "recipients that can assign tasks and create new one directly", "of tasks # 2) Ensure the defaults are correct (and", "== 'done': return self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values: return self.env.ref('project.mt_task_stage')", "_( \"The project cannot be shared with the recipient(s) because", "help=\"Automatically modify the kanban state when the customer replies to", "default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard',", "fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [ ('name_uniq', 'unique (name)', \"Tag name", "set, stages must belong to this section or be a", "if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task: task.project_id.privacy_visibility", "not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6,", "If they are notified they should probably have access to", "to 'blocked' (red bullet).\\n\") is_closed = fields.Boolean('Closing Stage', help=\"Tasks in", "= self.default_get(rec_fields) for task in self: for f in rec_fields:", "if task.parent_id: # set the parent to the duplicated task", "_onchange_alias_name(self): if not self.alias_enabled: self.alias_name = False def _compute_alias_enabled(self): for", "'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def", "ValidationError(_(\"The project visibility setting doesn't allow portal users to see", "or issue is in that stage.') mail_template_id = fields.Many2one( 'mail.template',", "return vals @api.model_create_multi def create(self, vals_list): default_stage = dict() for", "task (including its sub-tasks).', tracking=True) subtask_planned_hours = fields.Float(\"Sub-tasks Planned Hours\",", "self.id, # will give default subtask field in `default_get` 'default_company_id':", "def _map_tasks_default_valeus(self, task, project): \"\"\" get the default value for", "access right if 'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res =", "and will_write_phone: task.ribbon_message = _('By saving this change, the customer", "task.partner_id and task.partner_id.email != task.partner_email: task.partner_email = task.partner_id.email def _inverse_partner_email(self):", "task.repeat_unit == 'day' else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date +", "= self.env.ref('project.group_project_user').id group_func = lambda pdata: pdata['type'] == 'user' and", "= fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat',", "state when the customer replies to the feedback for this", "= not_fav_projects = self.env['project.project'].sudo() for project in self: if self.env.user", "for task in self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model',", "'Date of the Year'), ('day', 'Day of the Year'), ],", "ValidationError(_('Error! You cannot create recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids') def", "to portal users and portal customers. If they are notified", "section_ids section_ids = [] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain =", "or \"{}\") defaults['project_id'] = self.id return values # --------------------------------------------------- #", "include comment and incoming emails in communication history website_message_ids =", "= self._get_recurrence_fields() defaults = self.default_get(rec_fields) for task in self: for", "= _('Customer Email') if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason)", "dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open = duration_data['days'] else: task.working_hours_open", "allow you to hide the project without removing it.\") sequence", "# --------------------------------------------------- # Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self):", "\"\"\" Unsubscribe from all tasks when unsubscribing from a project", "containing tasks. You can either archive it or first delete", "------------------------------------------------ @api.model def default_get(self, default_fields): vals = super(Task, self).default_get(default_fields) days", "'in', self.recurrence_id.ids)], } # --------------------------------------------------- # Rating business # ---------------------------------------------------", "task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self): for task in self:", "stage is folded in the kanban view when there are", "= fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date,", "@api.model def _read_group_stage_ids(self, stages, domain, order): search_domain = [('id', 'in',", "if self.project_id.privacy_visibility == 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda", "def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return [default_project_id] if default_project_id else", "fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([ ('date', 'Date of", "and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() # recurrence", "def action_subtask(self): action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display all subtasks of", "super(Task, tasks).write(vals) # rating on stage if 'stage_id' in vals", "access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def", "('res_id', '=', project.id), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids)", "'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message = False for", "'July'), ('august', 'August'), ('september', 'September'), ('october', 'October'), ('november', 'November'), ('december',", "optionally Issues if the Issue Tracker module is installed).\") privacy_visibility", "Ratings Status', default=\"stage\", required=True, help=\"How to get customer feedback?\\n\" \"-", "self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model):", "[ ('project_date_greater', 'check(date >= date_start)', 'Error! project start-date must be", "self.alias_enabled: self.alias_name = False def _compute_alias_enabled(self): for project in self:", "self.analytic_account_id.id)] return action def action_view_all_rating(self): \"\"\" return the action to", "if 'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')): #", "('portal', 'Invited portal users and all internal users'), ], string='Visibility',", "= fields.Boolean(string='Folded in Kanban', help='This stage is folded in the", "or self.project_id ctx = dict(self.env.context) ctx = {k: v for", "default=lambda self: self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled',", "Valid Explanation', readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation',", "def rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id() if not res and", "name = fields.Char('Name', required=True) color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints =", "vals: vals[\"company_id\"] = self.env[\"project.project\"].browse( project_id ).company_id.id or self.env.company.id if project_id", "self.env.user.id}) # If depth == 1, return only direct children", "if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled')", "tasks.\\n\" \"- Invited portal and all internal users: employees may", "= users def _inverse_allowed_user(self): for project in self: allowed_users =", "= project_actions groups = [new_group] + groups if self.project_id.privacy_visibility ==", "(self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id')", "else: task.ribbon_message = False @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion():", "action['domain'] = [('id', 'child_of', self.id), ('id', '!=', self.id)] # update", "set to False, it will allow you to hide the", "False} def unlink(self): if any(self.mapped('recurrence_id')): # TODO: show a dialog", "the project's analytic account \"\"\" action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] =", "self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image') if image_attachments:", "wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return { 'name': _('Confirmation'),", "map tasks from old to new project \"\"\" project =", "!= 'portal'): task.access_warning = _( \"The task cannot be shared", "number will also be updated.') else: task.ribbon_message = False @api.constrains('parent_id')", ".env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0] action['display_name'] = self.name return action def action_view_account_analytic_line(self):", "OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else: recurrence_domain", "odoo.osv.expression import OR from .project_task_recurrence import DAYS, WEEKS class ProjectTaskType(models.Model):", "('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'), ('monthly', 'Once", "when displaying a list of Projects.\") partner_id = fields.Many2one('res.partner', string='Customer',", "c in count} for task in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id,", "# takes all existing ratings _check_company_auto = True def _compute_attached_docs_count(self):", "vals: vals['kanban_state'] = 'normal' # user_id change: update date_assign if", "Email')) return recipients def _notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict() if", "headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self, message, msg_vals): if", "('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'),", "task partner_id remains the same. \"\"\" for task in self:", "if not default.get('name'): default['name'] = _(\"%s (copy)\") % (self.name) project", "('res_id', 'in', project.task_ids.ids) ]) def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in',", "@api.onchange('alias_enabled') def _onchange_alias_name(self): if not self.alias_enabled: self.alias_name = False def", "copy=False) recurrence_update = fields.Selection([ ('this', 'This task'), ('subsequent', 'This and", "bad feedback will set the kanban state to 'blocked' (red", "('0', 'Normal'), ('1', 'Important'), ], default='0', index=True, string=\"Priority\") sequence =", "for this stage.\\n\" \" * A good feedback from the", "[p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p] task.message_subscribe(partner_ids)", "('res_model', '=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids", "'blocked' (red bullet).\\n\") is_closed = fields.Boolean('Closing Stage', help=\"Tasks in this", "{} defaults = { 'name': msg.get('subject') or _(\"No Subject\"), 'email_from':", "mail_thread message_new that is called by the mailgateway through message_process.", "the sequence order when displaying a list of tasks.\") stage_id", "len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval > 0 and\\ (not", "then an email will be sent to the customer when", "{} if not default.get('name'): default['name'] = _(\"%s (copy)\") % (self.name)", "to the customer when the task reaches this step.\") auto_validation_kanban_state", "else None if not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids,", "return [default_project_id] if default_project_id else None active = fields.Boolean('Active', default=True)", "def _compute_partner_email(self): for project in self: if project.partner_id and project.partner_id.email", "if project.partner_id and project.partner_id.email != project.partner_email: project.partner_email = project.partner_id.email def", "'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on", "related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main Attachments\", help=\"Attachment that don't", "sequence = fields.Integer(default=10, help=\"Gives the sequence order when displaying a", "if recurrence_update != 'this': recurrence_domain = [] if recurrence_update ==", "self.map_tasks(project.id) return project @api.model def create(self, vals): # Prevent double", "\"\"\" Overrides mail_thread message_new that is called by the mailgateway", "task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for task in", "values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults", "= fields.Selection([ ('forever', 'Forever'), ('until', 'End Date'), ('after', 'Number of", "in self: project.doc_count = Attachment.search_count([ '|', '&', ('res_model', '=', 'project.project'),", "_compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for project in self: project.doc_count =", "this task (including its sub-tasks).', tracking=True) subtask_planned_hours = fields.Float(\"Sub-tasks Planned", "# set the parent to the duplicated task defaults['parent_id'] =", "created through the chatter using # suggested recipients. This heuristic", "= task.partner_id and task.partner_phone != task.partner_id.phone if will_write_email and will_write_phone:", "False for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()): date", "vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until' in default_fields: vals['repeat_until']", "',' + (msg.get('cc') or '')) # check left-part is not", "analytic_accounts_to_delete |= project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result", "domain=[('share', '=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Number of documents attached\")", "domain=[('share', '=', False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed Portal Users\",", "or be a default stage; if not set, stages must", "fields.Datetime(string='Last Stage Update', index=True, copy=False, readonly=True) project_id = fields.Many2one('project.project', string='Project',", "fields.Integer(default=10, help=\"Gives the sequence order when displaying a list of", "fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True, required=True, index=True) description = fields.Html(string='Description')", "hide the project without removing it.\") sequence = fields.Integer(default=10, help=\"Gives", "all of its tasks.')) # Delete the empty related analytic", "('third', 'Third'), ('last', 'Last'), ], default='first', compute='_compute_repeat', readonly=False) repeat_weekday =", "fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False) fri = fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False) sat =", "task.partner_id and task.partner_email != task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def", "displayed for the done state for kanban selection, when the", "not rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return", "= super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg) partner_ids = [p.id", "== 'portal' and pdata['id'] in allowed_user_ids, {} )) portal_privacy =", "self.env.context, } def unlink(self): # Check project is empty for", "stage; if not set, stages must be default stages \"\"\"", "[(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])] name =", "'project_id', string=\"Task Activities\") resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id')", "updates the document according to the email. \"\"\" # remove", "import OR from .project_task_recurrence import DAYS, WEEKS class ProjectTaskType(models.Model): _name", "project should be displayed on your dashboard.\") label_tasks = fields.Char(string='Use", "is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard', help=\"Whether this", "heuristic allows to avoid ugly hacks in JS. new_partner =", "task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res =", "an email will be sent to the customer when the", "'=', False), ('is_closed', '=', False)]) else: task.stage_id = False @api.returns('self',", "= self.id return values # --------------------------------------------------- # Actions # ---------------------------------------------------", "\"\"\" Handle project users and managers recipients that can assign", "('second', 'Second'), ('third', 'Third'), ('last', 'Last'), ], default='first', compute='_compute_repeat', readonly=False)", "follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not in default:", "= fields.Float(\"Sub-tasks Planned Hours\", compute='_compute_subtask_planned_hours', help=\"Sum of the time planned", "('mimetype', 'ilike', 'image')]\", string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked", "self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False}) if 'active' in vals: #", "account to which this project is linked for financial management.", "_(\"%s (copy)\", self.name) if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return super(Task,", "in allowed_user_ids new_group = ('group_project_user', group_func, {}) if not self.user_id", "action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None) return", "_compute_company_id(self): for task in self.filtered(lambda task: task.project_id): task.company_id = task.project_id.company_id", "task.repeat_number > 5 or task.repeat_type == 'forever' or len(recurring_dates) >", "repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return", "self: if task.partner_id and task.partner_email != task.partner_id.email: task.partner_id.email = task.partner_email", "project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for project in self: if project.partner_id", "== 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata: pdata['type']", "raise UserError(_(\"Sorry. You can't set a task as its parent", "a bad feedback will set the kanban state to 'blocked'", "in that stage to display.') rating_template_id = fields.Many2one( 'mail.template', string='Rating", "inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message')", "if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = [] if section_ids: search_domain", "Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in',", "self: analytic_account = self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id,", "_notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict() if self.project_id: current_objects = [h", "or self.env.company.id if project_id and \"stage_id\" not in vals: #", "default subtask field in `default_get` 'default_company_id': default_project.company_id.id if default_project else", "mail templates on the stages for which you want to", "of tasks.\") stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict',", "recurrence rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields and vals.get('recurring_task')", "tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'): for project", "State Label', tracking=True) create_date = fields.Datetime(\"Created On\", readonly=True, index=True) write_date", "of the project's analytic account \"\"\" action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context']", "in init_values and self.kanban_state == 'done': return self.env.ref('project.mt_task_ready') elif 'stage_id'", "-*- # Part of Odoo. See LICENSE file for full", "portal users \"\"\" res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes", "k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name) + ':', 'default_parent_id': self.id, #", "= fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False) # retrieves", "kanban state when changing stage if 'kanban_state' not in vals:", "required=True, help='Override the default value displayed for the normal state", "Explanation', readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True,", "task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self): for task in", "'default_name': self.env.context.get('name', self.name) + ':', 'default_parent_id': self.id, # will give", "project in readgroup] + self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids,", "accessible by the recipient(s).\") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods =", "rec_field in rec_fields} for task in self: if task.recurrence_id: task.recurrence_id.write(rec_values)", "Project(models.Model): _name = \"project.project\" _description = \"Project\" _inherit = ['portal.mixin',", "when the task or issue is in that stage.') mail_template_id", "= fields.Char(\"Name\", index=True, required=True, tracking=True) description = fields.Html() active =", "if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return res def", "task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month ==", "this stage are considered as closed.\") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def", "allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields", "---------------------------------------- def stage_find(self, section_id, domain=[], order='sequence'): \"\"\" Override of the", "'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed',", "tasks # 2) Ensure the defaults are correct (and computed", "'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for task in self: task.repeat_show_day =", "task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences)", "= fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False)", "repeat_type = fields.Selection([ ('forever', 'Forever'), ('until', 'End Date'), ('after', 'Number", "in email_list if x.split('@')[0] not in aliases] @api.model def message_new(self,", "replies to the feedback for this stage.\\n\" \" * A", "without depth limit def _get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda children:", "'this': recurrence_domain = [] if recurrence_update == 'subsequent': for task", "if task.recurring_task: task[f] = defaults.get(f) else: task[f] = False def", "res def email_split(self, msg): email_list = tools.email_split((msg.get('to') or '') +", "changes, the task partner_id remains the same. \"\"\" for task", "not already an alias aliases = self.mapped('project_id.alias_name') return [x for", "project is too restricted. Set the privacy to 'Visible by", "= self.env['project.project'].sudo() for project in self: if self.env.user in project.favorite_user_ids:", "coding: utf-8 -*- # Part of Odoo. See LICENSE file", "'year' and task.repeat_on_year == 'day') task.repeat_show_dow = task.recurring_task and task.repeat_unit", "vals): if 'active' in vals and not vals['active']: self.env['project.task'].search([('stage_id', 'in',", "the task partner_id is automatically changed also. 2) if the", "partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain=\"['|', ('company_id', '=', False),", "and revenue on your project.\") favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel',", "to close', store=True, group_operator=\"avg\") # customer portal: include comment and", "vals[rec_field] for rec_field in rec_fields} for task in self: if", "project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\") # Computed field about working", "compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) company_id = fields.Many2one('res.company', string='Company',", "task.recurrence_message += '<li>%s</li>' % date.strftime(date_format) if task.repeat_type == 'after' and", "help=\"Defines the visibility of the tasks of the project:\\n\" \"-", "to see all the rating of the project and activate", "fields.Integer(\"Sub-task count\", compute='_compute_subtask_count') email_from = fields.Char(string='Email From', help=\"These people will", "self: if not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id # --------------------------------------------------- #", "= \"date_assign\" _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access =", "= fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=',", "compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\") #", "return [(6, 0, [self.env.uid])] name = fields.Char(\"Name\", index=True, required=True, tracking=True)", "init_values: return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): \"\"\"", "self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values: return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values)", "task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning() for task in self.filtered(lambda x:", "portal_users elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility", "its view if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project =", "{rec_field: vals[rec_field] for rec_field in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence", "for c in count} for task in recurring_tasks: task.recurring_count =", "the task reaches this step.\") auto_validation_kanban_state = fields.Boolean('Automatic kanban status',", "internal users: employees may only see the followed project and", "has no partner_id, use the project partner_id if any, or", "Set the privacy to 'Visible by following customers' in order", "if default is None: default = {} if not default.get('name'):", "self.browse(new_project_id) tasks = self.env['project.task'] # We want to copy archived", "is empty for project in self.with_context(active_test=False): if project.tasks: raise UserError(_('You", "group_data in groups: if group_name in ('customer', 'user') or group_name", "'subsequent': for task in self: recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id',", "token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model): \"\"\"", "stage' (green bullet).\\n\" \" * A medium or a bad", ").company_id.id or self.env.company.id if project_id and \"stage_id\" not in vals:", "task.partner_email != task.partner_id.email will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone", "store=\"True\", readonly=False) allowed_user_ids = fields.Many2many('res.users', string=\"Visible to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True,", "'mail.template', string='Rating Email Template', domain=[('model', '=', 'project.task')], help=\"If set and", "'portal' and pdata['id'] in allowed_user_ids, {} )) portal_privacy = self.project_id.privacy_visibility", "_description = \"Project Tags\" def _get_default_color(self): return randint(1, 11) name", "string='Company', compute='_compute_company_id', store=True, readonly=False, required=True, copy=True, default=_default_company_id) color = fields.Integer(string='Color", "], string='Day Of The Week', compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([", "'yearly': 365} for project in self: project.rating_request_deadline = fields.datetime.now() +", "self.env.company.id if project_id and \"stage_id\" not in vals: # 1)", "3, return children to third generation # If depth <=", "the mail templates on the stages for which you want", "rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields and vals.get('recurring_task') is", "'comment'])]) # recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string=\"Recurrent\")", "default_project = self.project_id.subtask_project_id or self.project_id ctx = dict(self.env.context) ctx =", "when the task or issue reaches this step.\") fold =", "project in self.filtered(lambda project: project.privacy_visibility == 'portal'): project.allowed_user_ids |= project.partner_id.user_ids", "compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([ (str(i), str(i)) for i in", "= _( \"The project cannot be shared with the recipient(s)", "if project_id and \"stage_id\" not in vals: # 1) Allows", "a least 1 task in that stage # a task", "in self: if project.partner_id and project.partner_email != project.partner_id.email: project.partner_id.email =", "'Important'), ], default='0', index=True, string=\"Priority\") sequence = fields.Integer(string='Sequence', index=True, default=10,", "= fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count',", "in default: self.map_tasks(project.id) return project @api.model def create(self, vals): #", "ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id", "[(3, self.env.uid)]}) def action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all')", "if task.partner_id: reason = _('Customer Email') if task.partner_id.email else _('Customer')", "ondelete='set null', domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\", check_company=True,", "'=', 'project.task'), ('res_id', 'in', self.task_ids.ids) ]) action['context'] = \"{'default_res_model': '%s','default_res_id':", "fields.Char(string='Stage Name', required=True, translate=True) description = fields.Text(translate=True) sequence = fields.Integer(default=1)", "couln't use attachment_ids because a one2many is represented as a", "], compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([ ('first', 'First'), ('second', 'Second'),", "of the project:\\n\" \"- Invited internal users: employees may only", "'stage_id' in init_values: return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self,", "\"- Invited internal users: employees may only see the followed", "by the scheduler @api.model def _send_rating_all(self): projects = self.search([ ('rating_active',", "module is installed).\") privacy_visibility = fields.Selection([ ('followers', 'Invited internal users'),", "= [ ('project_date_greater', 'check(date >= date_start)', 'Error! project start-date must", "= self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids", "duplication \"\"\" return { 'stage_id': task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id,", "modify the kanban state when the customer replies to the", "('may', 'May'), ('june', 'June'), ('july', 'July'), ('august', 'August'), ('september', 'September'),", "CRUD overrides # ------------------------------------------------ @api.model def default_get(self, default_fields): vals =", "'September'), ('october', 'October'), ('november', 'November'), ('december', 'December'), ], compute='_compute_repeat', readonly=False)", "recipients. This heuristic allows to avoid ugly hacks in JS.", "= stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active = fields.Boolean(default=True) name", "== 'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults =", "('project_ids', '=', self.env.context['default_project_id'])] + search_domain stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID)", "self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] = defaults =", "repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model", "inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) company_id = fields.Many2one('res.company', string='Company', required=True,", "== 'day') task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week' task.repeat_show_month", "another stage.\\n\" \"- Periodical Rating: email will be sent periodically.\\n\\n\"", "create(self, vals_list): default_stage = dict() for vals in vals_list: project_id", "update date_assign if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() # Stage change:", "task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for task in self:", "task in self: if task.partner_id and task.partner_id.email != task.partner_email: task.partner_email", "favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids) \\", "project for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not", "self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id or self.project_id ctx = dict(self.env.context)", "recurring tasks. Please, disable the recurrence first.')) # stage change:", "once a day by the scheduler @api.model def _send_rating_all(self): projects", "to set alias of tasks to their project if any.", "of commands so we used res_model & res_id displayed_image_id =", "@api.depends('child_ids') def _compute_subtask_count(self): for task in self: task.subtask_count = len(task._get_all_subtasks())", "section or be a default stage; if not set, stages", "This method should be called once a day by the", "it accessible by the recipient(s).\") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task", "cannot be shared with the recipient(s) because the privacy of", "Account')), 'company_id': values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True, })", "# CRUD overrides # ------------------------------------------------ @api.model def default_get(self, default_fields): vals", "@api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage in self: disabled_projects =", "in vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=',", "= fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False) fri = fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False) sat", "partner_id changes, the task partner_id remains the same. \"\"\" for", "create_context = dict(self.env.context or {}) create_context['default_user_id'] = False if custom_values", "= fields.Integer(\"Sub-task count\", compute='_compute_subtask_count') email_from = fields.Char(string='Email From', help=\"These people", "= fields.Selection([ ('date', 'Date of the Year'), ('day', 'Day of", "kanban state to 'blocked' (red bullet).\\n\") is_closed = fields.Boolean('Closing Stage',", "\"The project cannot be shared with the recipient(s) because the", "string='Working hours to close', store=True, group_operator=\"avg\") working_days_open = fields.Float(compute='_compute_elapsed', string='Working", "self.recurrence_id.unlink() tasks = self recurrence_update = vals.pop('recurrence_update', 'this') if recurrence_update", "in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for", "task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return", "self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_id.phone = project.partner_phone", "super(Project, self).copy(default) if self.subtask_project_id == self: project.subtask_project_id = project for", "project.doc_count = Attachment.search_count([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', '=',", "responsible if no other responsible is # found. create_context =", "so we used res_model & res_id displayed_image_id = fields.Many2one('ir.attachment', domain=\"[('res_model',", "of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task in self.filtered(lambda t:", "in JS. new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from) if", "Indeed we # do not want to explicitly set user_id", "= [{'url': take_action, 'title': _('I take it')}] new_group[2]['actions'] = project_actions", "recurrence.id tasks = super().create(vals_list) for task in tasks: if task.project_id.privacy_visibility", "= self recurrence_update = vals.pop('recurrence_update', 'this') if recurrence_update != 'this':", "readonly=False) sun = fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([ (str(i),", "timedelta(days=7) if 'repeat_weekday' in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals", "','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self,", "_notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None): \"\"\" Override to set alias", "project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for project in", "project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids new_group = ('group_project_user',", "= fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator=\"avg\") working_days_open =", "\"\"\" res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users", "partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Add the users subscribed to allowed", "def _onchange_task_company(self): if self.project_id.company_id != self.company_id: self.project_id = False @api.depends('project_id.company_id')", "allows to avoid ugly hacks in JS. new_partner = message.partner_ids.filtered(lambda", "= fields.Boolean('Active', default=True) name = fields.Char(string='Stage Name', required=True, translate=True) description", "taken from the lead: - section_id: if set, stages must", "not project_id: return False return self.stage_find(project_id, [('fold', '=', False), ('is_closed',", "task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for date", "Account\", copy=False, ondelete='set null', domain=\"['|', ('company_id', '=', False), ('company_id', '=',", "of _get_default_stage_id or _stage_find), if project_id not in default_stage: default_stage[project_id]", "string='Working hours to assign', store=True, group_operator=\"avg\") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working", "task.partner_email = task.partner_id.email def _inverse_partner_email(self): for task in self: if", "fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company) currency_id = fields.Many2one('res.currency', related=\"company_id.currency_id\",", "task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for task in self: if task.partner_id", "a day by the scheduler @api.model def _send_rating_all(self): projects =", "# Rating business # --------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for task", "the project is too restricted. Set the privacy to 'Visible", "= self.env['account.analytic.account'] for project in self: if project.analytic_account_id and not", "for task in self: if task.partner_id and task.partner_phone != task.partner_id.phone:", "self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count = 0 recurring_tasks", "@api.model_create_multi def create(self, vals_list): default_stage = dict() for vals in", "fields.Char(string='Email From', help=\"These people will receive email.\", index=True, compute='_compute_email_from', store=\"True\",", "'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite',", "must belong to this section or be a default stage;", "from odoo.osv.expression import OR from .project_task_recurrence import DAYS, WEEKS class", "= fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True) company_id = fields.Many2one( 'res.company',", "in self: users = project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids = users", "Project', readonly=True) allow_subtasks = fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True) subtask_count =", "'project_ids': project_ids, 'stage_ids': self.ids }) context = dict(self.env.context) context['stage_view'] =", "11) name = fields.Char('Name', required=True) color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints", "task._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of task(s).'))", "update date_assign if vals.get('user_id') and 'date_assign' not in vals: vals['date_assign']", "{c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count} for task in recurring_tasks:", "vals = super(Task, self).default_get(default_fields) days = list(DAYS.keys()) week_start = fields.Datetime.today().weekday()", "in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_id.phone =", "restricted. Set the privacy of the project to 'Visible by", "duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults) old_to_new_tasks[task.id]", "duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open =", "project.partner_email != project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for", "= fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard', help=\"Whether this project", "in Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id',", "the task partner_id has been set: 1) if the project", "== 'year' and task.repeat_on_year == 'date') task.repeat_show_week = task.recurring_task and", "will be sent to the customer when the task reaches", "fields.Datetime.today().weekday() if all(d in default_fields for d in days): vals[days[week_start]]", "0)) @api.model def _map_tasks_default_valeus(self, task, project): \"\"\" get the default", "task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else: recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)]", "for p in disabled_projects) else: stage.disabled_rating_warning = False class Project(models.Model):", "[('id', 'in', stages.ids)] if 'default_project_id' in self.env.context: search_domain = ['|',", "True, }) return analytic_account def _create_analytic_account(self): for project in self:", "fields.Selection([ ('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year', 'Years'), ],", "default=1, compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([ ('day', 'Days'), ('week', 'Weeks'),", "%s' % p.name for p in disabled_projects) else: stage.disabled_rating_warning =", "self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def", "task.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False)", "self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url': take_action, 'title':", "action_recurring_tasks(self): return { 'name': 'Tasks in Recurrence', 'type': 'ir.actions.act_window', 'res_model':", "inverse='_inverse_is_favorite', string='Show Project on dashboard', help=\"Whether this project should be", "= self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None task_subtypes = (project_subtypes.mapped('parent_id') |", "def _compute_recurrence_message(self): self.recurrence_message = False for task in self.filtered(lambda t:", "= task.partner_id and task.partner_email != task.partner_id.email will_write_phone = task.partner_id and", "Stage\", readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent Task', index=True) child_ids", "portal_privacy: group_data['has_button_access'] = True return groups def _notify_get_reply_to(self, default=None, records=None,", "of this task.\") user_id = fields.Many2one('res.users', string='Assigned to', default=lambda self:", "return self.repeat_interval > 0 and\\ (not self.repeat_show_dow or self._get_weekdays()) and\\", "allow write access right if 'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self)", "project in self: project.is_favorite = self.env.user in project.favorite_user_ids def _inverse_is_favorite(self):", "copy=False) ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False)", "default: self.map_tasks(project.id) return project @api.model def create(self, vals): # Prevent", "in self: project.alias_enabled = project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def", "if self.repeat_unit == 'week': return [fn(n) for day, fn in", "project and tasks.\\n\" \"- All internal users: employees may see", "fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\", readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent Task',", "sun = fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([ (str(i), str(i))", "allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda pdata: pdata['type'] ==", "0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults)", "fn in DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval',", "when the task reaches this step.\") auto_validation_kanban_state = fields.Boolean('Automatic kanban", "_inverse_partner_phone(self): for task in self: if task.partner_id and task.partner_phone !=", "= str(fields.Datetime.today().day) if 'repeat_month' in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month -", "copy=False, ondelete='set null', domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\",", "msg_vals=None): \"\"\" Handle project users and managers recipients that can", "task.name, 'company_id': project.company_id.id, } def map_tasks(self, new_project_id): \"\"\" copy and", "will_write_email: task.ribbon_message = _('By saving this change, the customer email", "False) new_task = task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks += new_task", "email. \"\"\" email_list = self.email_split(msg) partner_ids = [p.id for p", "fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain=\"['|', ('company_id', '=', False), ('company_id', '=',", "('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'), ], string='Day Of The", "'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for task in", "= defaults.get(f) else: task[f] = False def _get_weekdays(self, n=1): self.ensure_one()", "will_write_email and will_write_phone: task.ribbon_message = _('By saving this change, the", "day by the scheduler @api.model def _send_rating_all(self): projects = self.search([", "to be responsible if no other responsible is # found.", "compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\", required=True, help=\"Internal email", "self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_phone = project.partner_id.phone", "for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self):", "'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message = False for task in", "which sub-tasks of the current project will be created. It", "task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for task in self: task.email_from", "# by using default get (instead of _get_default_stage_id or _stage_find),", "between record creation and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours", "'kanban_state' not in vals: vals['kanban_state'] = 'normal' # user_id change:", "Subscribe to all existing active tasks when subscribing to a", "domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]) color = fields.Integer(string='Color", "id\" _rating_satisfaction_days = False # takes all existing ratings _check_company_auto", "in allowed_user_ids, {} )) portal_privacy = self.project_id.privacy_visibility == 'portal' for", "project_actions = [{'url': take_action, 'title': _('I take it')}] new_group[2]['actions'] =", "'company_id': values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True, }) return", "= False class Project(models.Model): _name = \"project.project\" _description = \"Project\"", "tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval", "email will be sent periodically.\\n\\n\" \"Don't forget to set up", "string='Kanban Valid Explanation', readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing", "'name': msg.get('subject') or _(\"No Subject\"), 'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id':", "kanban status', default=False, help=\"Automatically modify the kanban state when the", "= allowed_users - project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url() for project", "for group_name, group_method, group_data in groups: if group_name in ('customer',", "rating on stage if 'stage_id' in vals and vals.get('stage_id'): self.filtered(lambda", "followed by\\n\" \" them or by someone of their company.\")", "new_group[2]['actions'] = project_actions groups = [new_group] + groups if self.project_id.privacy_visibility", "bullet).\\n\") is_closed = fields.Boolean('Closing Stage', help=\"Tasks in this stage are", "and task.create_date ) for task in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date)", "project else: not_fav_projects |= project # Project User has no", "its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'): for", "copy=False, readonly=True) date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update =", "aliases] @api.model def message_new(self, msg, custom_values=None): \"\"\" Overrides mail_thread message_new", "string=\"Task Activities\") resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids", "self: will_write_email = task.partner_id and task.partner_email != task.partner_id.email will_write_phone =", "Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([ ('day', 'Days'), ('week',", "the projects with a least 1 task in that stage", "for section_id in section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain += list(domain)", "Delete the empty related analytic account analytic_accounts_to_delete = self.env['account.analytic.account'] for", "project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not in default: self.map_tasks(project.id) return project", "= fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id =", "if task.partner_id and task.partner_email != task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone')", "self.displayed_image_id = image_attachments[0] if self.email_from and not self.partner_id: # we", "= fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True)", "feedback?\\n\" \"- Rating when changing stage: an email will be", "following tasks'), ('all', 'All tasks'), ], default='this', store=False) recurrence_message =", "_name = \"project.tags\" _description = \"Project Tags\" def _get_default_color(self): return", "update date_last_stage_update if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now", "date_start)', 'Error! project start-date must be lower than project end-date.')", "|= internal_users return res def message_unsubscribe(self, partner_ids=None, channel_ids=None): \"\"\" Unsubscribe", "@api.model def get_empty_list_help(self, help): tname = _(\"task\") project_id = self.env.context.get('default_project_id',", "task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self): for task in self: if", "project @api.model def create(self, vals): # Prevent double project creation", "fields.Selection( [('stage', 'Rating when changing stage'), ('periodic', 'Periodical Rating') ],", "you want to get the customer's feedbacks.\") rating_status_period = fields.Selection([", "of the Month'), ], default='date', compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([", "= task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids = [(6, 0, list(set(attachment_ids)", "self.ids }) return { 'name': _('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard',", "store=False) recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every',", "result def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Subscribe to all", "is 'Rating when changing stage', then an email will be", "--------------------------------------------------- def toggle_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project", "task in self: task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month'", "message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Add the users subscribed to", "in pdata['groups'] and pdata['id'] in allowed_user_ids new_group = ('group_project_user', group_func,", "= self.env.context.get('default_project_id') if not project_id: return False return self.stage_find(project_id, [('fold',", "('company_id', '=', False), ('company_id', '=', company_id)]\", check_company=True, help=\"Analytic account to", "_compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults = self.default_get(rec_fields) for task in", "_compute_project_id(self): for task in self: if not task.project_id: task.project_id =", "sent to the customer when the task reaches this step.\")", "partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Subscribe to all existing active tasks", "a.mimetype == 'image') if image_attachments: self.displayed_image_id = image_attachments[0] if self.email_from", "defaults = { 'name': msg.get('subject') or _(\"No Subject\"), 'email_from': msg.get('from'),", "should be called once a day by the scheduler @api.model", "set user_id to False; however we do not # want", "project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids def _compute_access_url(self):", "= [('id', 'in', stages.ids)] if 'default_project_id' in self.env.context: search_domain =", "feedback for this stage.\\n\" \" * A good feedback from", "= fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month =", "required=True, translate=True) description = fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids =", "}) return { 'name': _('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views':", "and not portal_privacy: group_data['has_button_access'] = False elif group_name == 'portal_customer'", "[(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': self.env.context,", "+ groups if self.project_id.privacy_visibility == 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0,", "user_names)) def _compute_attachment_ids(self): for task in self: attachment_ids = self.env['ir.attachment'].search([('res_id',", "string=\"Currency\", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False, ondelete='set null',", "1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type,", "rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer Ratings',", "was created through the chatter using # suggested recipients. This", "and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True,", "active = fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True, required=True, index=True) description", "\"\"\" res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids)", "'=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals)", "== 'subsequent': for task in self: recurrence_domain = OR([recurrence_domain, ['&',", "def _check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('Error! You cannot create", "which you want to get the customer's feedbacks.\") rating_status_period =", "'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month',", "t: t.recurring_task and t._is_recurrence_valid()): date = fields.Date.today() number_occurrences = min(5,", "group_operator=\"avg\") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator=\"avg\")", "','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self, message, msg_vals): if message.attachment_ids and", "lower than project end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self): for project", "the kanban state to 'blocked' (red bullet).\\n\") is_closed = fields.Boolean('Closing", "project without removing it.\") sequence = fields.Integer(default=10, help=\"Gives the sequence", "day, fn in DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task',", "tracking=True) subtask_planned_hours = fields.Float(\"Sub-tasks Planned Hours\", compute='_compute_subtask_planned_hours', help=\"Sum of the", "'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': self.env.context, } def unlink(self):", "be sent to the customer when the task reaches this", "= self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res = {task.id: aliases.get(task.project_id.id) for", "= self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic Account')), 'company_id': values.get('company_id') or", "parent task partner_id changes, the task partner_id remains the same.", "self.recurrence_message = False for task in self.filtered(lambda t: t.recurring_task and", "= False @api.depends('project_id.company_id') def _compute_company_id(self): for task in self.filtered(lambda task:", "user_id change: update date_assign if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() #", "is not already an alias aliases = self.mapped('project_id.alias_name') return [x", "display.') rating_template_id = fields.Many2one( 'mail.template', string='Rating Email Template', domain=[('model', '=',", "_compute_access_url(self): super(Task, self)._compute_access_url() for task in self: task.access_url = '/my/task/%s'", "string=\"Visible to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility',", "= super().create(vals_list) for task in tasks: if task.project_id.privacy_visibility == 'portal':", "the chatter using # suggested recipients. This heuristic allows to", "DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type',", "been set: 1) if the project partner_id changes, the task", "\"date_assign\" _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read'", "project.id if project.privacy_visibility == 'portal' and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids", "= fields.Selection([ ('date', 'Date of the Month'), ('day', 'Day of", "self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False}) if 'active' in", "to allowed portal users \"\"\" res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids,", "task or issue is in that stage.') mail_template_id = fields.Many2one(", "create_date = fields.Datetime(\"Created On\", readonly=True, index=True) write_date = fields.Datetime(\"Last Updated", "== 'week': return [fn(n) for day, fn in DAYS.items() if", "self)._track_template(changes) test_task = self[0] if 'stage_id' in changes and test_task.stage_id.mail_template_id:", "list of Projects.\") partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain=\"['|',", "'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly': 365}", "return only direct children # If depth == 3, return", "for task in self: rating_template = task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template,", "or issue reaches this step.\") fold = fields.Boolean(string='Folded in Kanban',", "group_name == 'portal_customer' and not portal_privacy: group_data['has_button_access'] = False elif", "(task.repeat_unit == 'year' and task.repeat_on_year == 'date') task.repeat_show_week = task.recurring_task", "end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self): for project in self: if", "fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([ (str(i), str(i)) for i", "len(recurring_dates) > 5: task.recurrence_message += '<li>...</li>' task.recurrence_message += '</ul>' if", "partner_id has been set: 1) if the project partner_id changes,", "new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal')", "give default subtask field in `default_get` 'default_company_id': default_project.company_id.id if default_project", "hierarchy of task(s).')) @api.model def get_empty_list_help(self, help): tname = _(\"task\")", "project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True, check_company=True,", "== 'week' task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year' @api.depends('recurring_task')", "See LICENSE file for full copyright and licensing details. import", "return res # ---------------------------------------- # Case management # ---------------------------------------- def", "in self.filtered(lambda task: task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self):", "= {} defaults = { 'name': msg.get('subject') or _(\"No Subject\"),", "fields.Many2one('project.project', string='Sub-task Project', ondelete=\"restrict\", help=\"Project in which sub-tasks of the", "def _compute_email_from(self): for task in self: task.email_from = task.partner_id.email or", "group_name, group_method, group_data in groups: if group_name in ('customer', 'user')", "in Recurrence\", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([", "or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users =", "\"{}\") defaults['project_id'] = self.id return values # --------------------------------------------------- # Actions", "children without depth limit def _get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda", "analytic_account = self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active':", "project in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_id.phone", "|= project.partner_id.user_ids return project def write(self, vals): allowed_users_changed = 'allowed_portal_user_ids'", "msg.get('author_id') } defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list =", "folded stage and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now()", "'portal'): portal_users = task.allowed_user_ids.filtered('share') if portal_users: user_names = ', '.join(portal_users[:10].mapped('name'))", "copy(self, default=None): if default is None: default = {} if", "domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\", check_company=True, help=\"Analytic account", "_check_no_portal_allowed(self): for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'): portal_users", "or '')) # check left-part is not already an alias", "'=', self._name), ('message_type', 'in', ['email', 'comment'])]) # recurrence fields allow_recurring_tasks", "def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If depth == 1, return", "'This and following tasks'), ('all', 'All tasks'), ], default='this', store=False)", "write(self, vals): now = fields.Datetime.now() if 'parent_id' in vals and", "= [new_group] + groups if self.project_id.privacy_visibility == 'portal': allowed_user_ids =", "self.repeat_number) and\\ (self.repeat_type != 'until' or self.repeat_until and self.repeat_until >", "fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False)", "action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of %s') % (self.name,)", "and task.partner_phone != task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id')", "('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly') _sql_constraints = [ ('project_date_greater',", "Rating: email will be sent periodically.\\n\\n\" \"Don't forget to set", "+ ':', 'default_parent_id': self.id, # will give default subtask field", "\"\"\" return the action to see all the analytic lines", "task.parent_id) and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task", "default='this', store=False) recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat", "self.default_get(rec_fields) for task in self: for f in rec_fields: if", "= [('id', 'child_of', self.id), ('id', '!=', self.id)] # update context,", "default=\"forever\", string=\"Until\", compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string=\"End Date\", compute='_compute_repeat', readonly=False)", "[] if recurrence_update == 'subsequent': for task in self: recurrence_domain", "if rec_fields: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields}", "compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([ ('this', 'This", "def _get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit == 'week': return [fn(n)", "self.ids)], ['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0] for project in readgroup]", "fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete=\"restrict\",", "**local_msg_vals) project_actions = [{'url': take_action, 'title': _('I take it')}] new_group[2]['actions']", "= fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False) wed = fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False) thu", "vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() # recurrence rec_fields = vals.keys()", "project is too restricted. Set the privacy of the project", "self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display all subtasks of current task action['domain'] =", "of documents attached\") date_start = fields.Date(string='Start Date') date = fields.Date(string='Expiration", "} def write(self, vals): if 'active' in vals and not", "repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month", "should be displayed on your dashboard.\") label_tasks = fields.Char(string='Use Tasks", "= task.recurrence_id[f] else: if task.recurring_task: task[f] = defaults.get(f) else: task[f]", "+= _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self):", "by project), # by using default get (instead of _get_default_stage_id", "def _notify_get_groups(self, msg_vals=None): \"\"\" Handle project users and managers recipients", "recipient(s).\") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods = {'daily': 1, 'weekly':", "project in self: if project.partner_id and project.partner_id.email != project.partner_email: project.partner_email", "date.strftime(date_format) if task.repeat_type == 'after' and task.repeat_number > 5 or", "fields.Selection([ ('0', 'Normal'), ('1', 'Important'), ], default='0', index=True, string=\"Priority\") sequence", "('employees', 'All internal users'), ('portal', 'Invited portal users and all", "+= '</ul>' if task.repeat_type == 'until': task.recurrence_message += _('<p><em>Number of", "vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task':", "aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res = {task.id: aliases.get(task.project_id.id)", "{ 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return res", "string='Tasks', domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]) color =", "False def _compute_alias_enabled(self): for project in self: project.alias_enabled = project.alias_domain", "create new one directly from notification emails. Also give access", "it or first delete all of its tasks.')) # Delete", "@api.depends('project_id.company_id') def _compute_company_id(self): for task in self.filtered(lambda task: task.project_id): task.company_id", "'Last'), ], default='first', compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([ ('mon', 'Monday'),", "1) def action_open_parent_task(self): return { 'name': _('Parent Task'), 'view_mode': 'form',", "]) def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&',", "self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number',", "medium or a bad feedback will set the kanban state", "fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True, check_company=True, change_default=True) planned_hours", "'=', True)]).write({'recurring_task': False}) if 'active' in vals: # archiving/unarchiving a", "self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Add the users", "vals['date_last_stage_update'] = now # reset kanban state when changing stage", "recipients = super(Task, self)._message_get_suggested_recipients() for task in self: if task.partner_id:", "# --------------------------------------------------- # This method should be called once a", "task.repeat_type == 'forever' or len(recurring_dates) > 5: task.recurrence_message += '<li>...</li>'", "('periodic', 'Periodical Rating') ], 'Customer Ratings Status', default=\"stage\", required=True, help=\"How", "in `default_get` 'default_company_id': default_project.company_id.id if default_project else self.env.company.id, }) action['context']", "subtype_ids=subtype_ids) if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task:", "@api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for task in self: task.email_from =", "it accessible by the recipient(s).\") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods", "order='parent_id').ids old_to_new_tasks = {} for task in self.env['project.task'].browse(task_ids): # preserve", "a project And add the portal user subscribed to allowed", "= \"priority desc, sequence, id desc\" _check_company_auto = True def", "[('id', 'child_of', self.id), ('id', '!=', self.id)] # update context, with", "set a task as its parent task.\")) if 'active' in", "!= 'portal'): project.access_warning = _( \"The project cannot be shared", "'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat',", "lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and", "'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id,", "and project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility == 'followers': allowed_user_ids =", "also be updated.') else: task.ribbon_message = False @api.constrains('parent_id') def _check_parent_id(self):", "date + timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)),", "= [('account_id', '=', self.analytic_account_id.id)] return action def action_view_all_rating(self): \"\"\" return", "in new_allowed_users]}) return res # ---------------------------------------- # Case management #", "default = {} if not default.get('name'): default['name'] = _(\"%s (copy)\",", "portal_users = all_users.filtered('share') internal_users = all_users - portal_users self.allowed_portal_user_ids |=", "task.partner_id = task.project_id.partner_id else: task.partner_id = task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email',", "creation self = self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals) if not", "_sql_constraints = [ ('name_uniq', 'unique (name)', \"Tag name already exists!\"),", "default_fields): vals = super(Task, self).default_get(default_fields) days = list(DAYS.keys()) week_start =", "'Yearly')], 'Rating Frequency', required=True, default='monthly') _sql_constraints = [ ('project_date_greater', 'check(date", "= fields.Selection([ ('normal', 'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban", "wizard.id, 'target': 'new', 'context': context, } def write(self, vals): if", "to stop the recurrence raise UserError(_('You cannot delete recurring tasks.", "odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning from odoo.tools.misc import format_date,", "('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self):", "be sent when a task is pulled in another stage.\\n\"", "for project in self: project.alias_enabled = project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids',", "90, 'yearly': 365} for project in self: project.rating_request_deadline = fields.datetime.now()", "= vals.pop('recurrence_update', 'this') if recurrence_update != 'this': recurrence_domain = []", "posting a message with a specified recipient (not a follower,", "= task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks += new_task return project.write({'tasks':", "string='Sub-task Project', ondelete=\"restrict\", help=\"Project in which sub-tasks of the current", "'=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else: recurrence_domain = [('recurrence_id', 'in',", "Update date_end if folded stage and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id']))", "index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=', project_id)]\", copy=False) tag_ids = fields.Many2many('project.tags',", "old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks +=", "to make it accessible by the recipient(s).\") @api.depends('rating_status', 'rating_status_period') def", "translate=True) description = fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids = fields.Many2many('project.project',", "= dict((data['project_id'][0], data['project_id_count']) for data in task_data) for project in", "project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self):", "project.alias_enabled = project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for", "Week', compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([ ('january', 'January'), ('february', 'February'),", "self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids)", "[default_project_id] if default_project_id else None active = fields.Boolean('Active', default=True) name", "('1', 'Important'), ], default='0', index=True, string=\"Priority\") sequence = fields.Integer(string='Sequence', index=True,", "task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and task.repeat_on_year ==", "= self.env.context.get('default_project_id', False) if project_id: name = self.env['project.project'].browse(project_id).label_tasks if name:", "Hours\", help='Time planned to achieve this task (including its sub-tasks).',", "= fields.Many2many('res.users', string=\"Visible to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility", "(not self.repeat_show_dow or self._get_weekdays()) and\\ (self.repeat_type != 'after' or self.repeat_number)", "we # do not want to explicitly set user_id to", "task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -= internal_users", "tracking=True, domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\") partner_email =", "on project duplication \"\"\" return { 'stage_id': task.stage_id.id, 'name': task.name,", "p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients =", "and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self, stage_id): project_task_type", "stage # a task can be in a stage even", "not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids',", "in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for task in self: task.subtask_count", "+ ',' + (msg.get('cc') or '')) # check left-part is", "return {'date_end': fields.Datetime.now()} return {'date_end': False} def unlink(self): if any(self.mapped('recurrence_id')):", "= task.legend_blocked else: task.kanban_state_label = task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url()", "dict() for vals in vals_list: project_id = vals.get('project_id') or self.env.context.get('default_project_id')", "internal users'), ], string='Visibility', required=True, default='portal', help=\"Defines the visibility of", "365} for project in self: project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period,", "active = fields.Boolean(default=True, help=\"If the active field is set to", "string=\"Main Attachments\", help=\"Attachment that don't come from message.\") # In", "values = super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults']", "in Kanban', help='This stage is folded in the kanban view", "user_names = ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The project visibility setting doesn't", "import format_date, get_lang from odoo.osv.expression import OR from .project_task_recurrence import", "[{'url': take_action, 'title': _('I take it')}] new_group[2]['actions'] = project_actions groups", "if 'recurring_task' in vals and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks =", "{k: v for k, v in ctx.items() if not k.startswith('search_default_')}", "subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None task_subtypes =", "\"- Periodical Rating: email will be sent periodically.\\n\\n\" \"Don't forget", "default=lambda s: _('Ready'), translate=True, required=True, help='Override the default value displayed", "Usually less or equal to the initially time planned of", "recurrence first.')) return super().unlink() # --------------------------------------------------- # Subtasks # ---------------------------------------------------", "fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks = super().create(vals_list)", "'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], } # --------------------------------------------------- # Rating business", "_send_rating_all(self): projects = self.search([ ('rating_active', '=', True), ('rating_status', '=', 'periodic'),", "fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed Internal Users\", default=lambda self: self.env.user, domain=[('share', '=',", "if project_id not in default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id')", "else: task.stage_id = False @api.returns('self', lambda value: value.id) def copy(self,", "blocked state for kanban selection, when the task or issue", "subtype_ids=follower.subtype_ids.ids) if 'tasks' not in default: self.map_tasks(project.id) return project @api.model", "= duration_data['hours'] task.working_days_close = duration_data['days'] else: task.working_hours_close = 0.0 task.working_days_close", "'allowed_portal_user_ids') def _compute_allowed_users(self): for project in self: users = project.allowed_internal_user_ids", "default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def create(self, vals_list):", "v in ctx.items() if not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name)", "fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban", "if not project_id: return False return self.stage_find(project_id, [('fold', '=', False),", "% (self._name, self.id) return action def _compute_is_favorite(self): for project in", "project.company_id.id, 'partner_id': project.partner_id.id, 'active': True, }) project.write({'analytic_account_id': analytic_account.id}) # ---------------------------------------------------", "new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from) if new_partner: self.search([", "task.recurring_task: task[f] = defaults.get(f) else: task[f] = False def _get_weekdays(self,", "string='Project', compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True, check_company=True, change_default=True) planned_hours =", "equal to the initially time planned of this task.\") user_id", "@api.model def _default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model", "self.env.company) currency_id = fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account',", "changing stage: an email will be sent when a task", "one2many is represented as a list of commands so we", "= 'normal' # user_id change: update date_assign if vals.get('user_id') and", "recipient (not a follower, a specific one) # on a", "{ 'name': _('Delete Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id,", "tname = _(\"task\") project_id = self.env.context.get('default_project_id', False) if project_id: name", "self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=',", "= task.legend_normal elif task.kanban_state == 'blocked': task.kanban_state_label = task.legend_blocked else:", "stage and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() #", "self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence", "commands so we used res_model & res_id displayed_image_id = fields.Many2one('ir.attachment',", "channel_ids=None): \"\"\" Unsubscribe from all tasks when unsubscribing from a", "the project.\", translate=True) tasks = fields.One2many('project.task', 'project_id', string=\"Task Activities\") resource_calendar_id", "readonly=False) repeat_until = fields.Date(string=\"End Date\", compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string=\"Repetitions\",", "= fields.Selection([ ('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('last', 'Last'),", "depth=0): children = self.mapped('child_ids').filtered(lambda children: children.active) if not children: return", "defaults are correct (and computed once by project), # by", "a task can be in a stage even if the", "= \"project.project\" _description = \"Project\" _inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread',", "_('By saving this change, the customer phone number will also", "> 0 and\\ (not self.repeat_show_dow or self._get_weekdays()) and\\ (self.repeat_type !=", "fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self, task, project): \"\"\"", "_compute_partner_id(self): \"\"\" If a task has no partner_id, use the", "project in self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids", "project to 'Visible by following customers' in order to make", "= fields.Datetime.now() # recurrence rec_fields = vals.keys() & self._get_recurrence_fields() if", "if default_project_id else None active = fields.Boolean('Active', default=True) name =", "project = self.browse(new_project_id) tasks = self.env['project.task'] # We want to", "fields.Integer(compute='_compute_task_count', string=\"Task Count\") task_ids = fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold',", "% task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning() for task in self.filtered(lambda", "the email. \"\"\" email_list = self.email_split(msg) partner_ids = [p.id for", "it')}] new_group[2]['actions'] = project_actions groups = [new_group] + groups if", "details. import ast from datetime import timedelta, datetime from random", "if 'active' in vals: # archiving/unarchiving a project does it", "timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week,", "to close', store=True, group_operator=\"avg\") working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to", "return task def message_update(self, msg, update_vals=None): \"\"\" Override to update", "task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for task in self: task.subtask_count =", "company_id)]\", check_company=True, help=\"Analytic account to which this project is linked", "Please, disable the recurrence first.')) return super().unlink() # --------------------------------------------------- #", "does not contains all field in its view if self._context.get('default_project_id'):", "all_users.filtered('share') internal_users = all_users - portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids", "task.recurring_task and task.repeat_unit == 'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields =", "self: disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active) if disabled_projects: stage.disabled_rating_warning", "readonly=False) sat = fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False) sun = fields.Boolean(string=\"Sun\", compute='_compute_repeat',", "= fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [ ('name_uniq', 'unique (name)', \"Tag", "!= project.partner_id.phone: project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self): for project in", "= fields.Html() active = fields.Boolean(default=True, help=\"If the active field is", "= {'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly':", "help='This stage is folded in the kanban view when there", "the new stage' (green bullet).\\n\" \" * A medium or", "super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):", "Stages') task_count = fields.Integer(compute='_compute_task_count', string=\"Task Count\") task_ids = fields.One2many('project.task', 'project_id',", "def create(self, vals_list): default_stage = dict() for vals in vals_list:", "_name = \"project.task\" _description = \"Task\" _date_name = \"date_assign\" _inherit", "{'date_end': fields.Datetime.now()} return {'date_end': False} def unlink(self): if any(self.mapped('recurrence_id')): #", "elif will_write_email: task.ribbon_message = _('By saving this change, the customer", "user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True) alias_enabled", "ugly hacks in JS. new_partner = message.partner_ids.filtered(lambda partner: partner.email ==", "readonly=False) repeat_day = fields.Selection([ (str(i), str(i)) for i in range(1,", "= name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return", "= fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'),", "on a document without customer means that it was created", "fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False) thu = fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False) fri =", "in vals and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self recurrence_update", "for project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name", "'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility ==", "group_data['has_button_access'] = True return groups def _notify_get_reply_to(self, default=None, records=None, company=None,", "= fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed Internal", "all the projects with a least 1 task in that", "- set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task in self:", "'in', self.ids), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', self.task_ids.ids) ])", "def _compute_is_favorite(self): for project in self: project.is_favorite = self.env.user in", "in self: task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in", "stop the recurrence raise UserError(_('You cannot archive recurring tasks. Please,", "_get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon',", "SUPERUSER_ID, _ from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning from", "= list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if all(d in default_fields for", "= _('By saving this change, the customer email and phone", "fields.Datetime.now()) ]) for project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class", "used for the tasks of the project.\", translate=True) tasks =", "string='Company', required=True, default=lambda self: self.env.company) currency_id = fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\",", "def map_tasks(self, new_project_id): \"\"\" copy and map tasks from old", "less or equal to the initially time planned of this", "= fields.Datetime(string='Last Stage Update', index=True, copy=False, readonly=True) project_id = fields.Many2one('project.project',", "stages for which you want to get the customer's feedbacks.\")", "self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If depth ==", "default_stage = dict() for vals in vals_list: project_id = vals.get('project_id')", "res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if", "= [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p]", "@api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id != self.company_id: self.project_id = False", "if task.project_id not in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [ ('fold',", "templates on the stages for which you want to get", "vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'): for project in self.filtered(lambda project:", "default='0', index=True, string=\"Priority\") sequence = fields.Integer(string='Sequence', index=True, default=10, help=\"Gives the", "headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers", "copy and map tasks from old to new project \"\"\"", "fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False, required=True, copy=True, default=_default_company_id) color", "section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain += list(domain) # perform search,", "p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p] task.message_subscribe(partner_ids) return task", "('create_date', '>=', task.create_date)]]) else: recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)] tasks", "not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail gateway", "'=', 'project.task')], help=\"If set and if the project's rating configuration", "_('Unknown Analytic Account')), 'company_id': values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'), 'active':", "!= 'until' or self.repeat_until and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def", ".sudo().read()[0] action['display_name'] = self.name return action def action_view_account_analytic_line(self): \"\"\" return", "an analytic account to record cost and revenue on your", "date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True) date_deadline = fields.Date(string='Deadline',", "children + children._get_all_subtasks(depth - 1) def action_open_parent_task(self): return { 'name':", "# recurrence fields rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields:", "in its view if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project", "fields.Date.today() number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after' else", "date_start = fields.Date(string='Start Date') date = fields.Date(string='Expiration Date', index=True, tracking=True)", "'Saturday'), ('sun', 'Sunday'), ], string='Day Of The Week', compute='_compute_repeat', readonly=False)", "help=\"Gives the sequence order when displaying a list of Projects.\")", "count} for task in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email')", "UserError(_('You cannot delete recurring tasks. Please, disable the recurrence first.'))", "section_ids.extend(self.mapped('project_id').ids) search_domain = [] if section_ids: search_domain = [('|')] *", "in self: project.access_url = '/my/project/%s' % project.id def _compute_access_warning(self): super(Project,", "task.partner_id: if task.project_id.partner_id: task.partner_id = task.project_id.partner_id else: task.partner_id = task.project_id.partner_id", "project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name = \"project.task\" _description =", "msg, update_vals=None): \"\"\" Override to update the task according to", "= {task.id: aliases.get(task.project_id.id) for task in self} leftover = self.filtered(lambda", "vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now # reset kanban state when changing", "gateway. Indeed we # do not want to explicitly set", "the project partner_id changes, the task partner_id is automatically changed", "records=None, company=None, doc_names=None): \"\"\" Override to set alias of tasks", "@api.model def _map_tasks_default_valeus(self, task, project): \"\"\" get the default value", "self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for date in recurring_dates[:5]: task.recurrence_message +=", "task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task in self: if not", "to', default=lambda self: self.env.uid, index=True, tracking=True) partner_id = fields.Many2one('res.partner', string='Customer',", "1) for section_id in section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain +=", "== 'image') if image_attachments: self.displayed_image_id = image_attachments[0] if self.email_from and", "vals.keys() & self._get_recurrence_fields() if rec_fields and vals.get('recurring_task') is True: rec_values", "['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain stage_ids = stages._search(search_domain, order=order,", "of the Year'), ('day', 'Day of the Year'), ], default='date',", "return dict(action, context=action_context) # --------------------------------------------------- # Business Methods # ---------------------------------------------------", "'name': project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active': True, }) project.write({'analytic_account_id':", "done state for kanban selection, when the task or issue", "From', help=\"These people will receive email.\", index=True, compute='_compute_email_from', store=\"True\", readonly=False)", "= self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids }) context = dict(self.env.context)", "should probably have access to the document. \"\"\" groups =", "task in self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if", "context['stage_view'] = stage_view return { 'name': _('Delete Stage'), 'view_mode': 'form',", "in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name = \"project.task\"", "Label', tracking=True) create_date = fields.Datetime(\"Created On\", readonly=True, index=True) write_date =", "= fields.Integer(string=\"Tasks in Recurrence\", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update", "= fields.Datetime.today().weekday() if all(d in default_fields for d in days):", "employees may see everything.\" \" Portal users may see project", "'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context':", "else: not_fav_projects |= project # Project User has no write", "( 'allowed_portal_users', lambda pdata: pdata['type'] == 'portal' and pdata['id'] in", "return { 'name': 'Tasks in Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task',", "from odoo import api, fields, models, tools, SUPERUSER_ID, _ from", "is True: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields}", "retrieves all the projects with a least 1 task in", "_get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])] name = fields.Char(\"Name\", index=True, required=True,", "allowed_users_changed: allowed_users = {project: project.allowed_user_ids for project in self} #", "def _compute_subtask_planned_hours(self): for task in self: task.subtask_planned_hours = sum(child_task.planned_hours +", "_description = \"Project\" _inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order", "fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer Ratings', default=lambda", "for project in self: permission_removed = allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed", "'blocked': task.kanban_state_label = task.legend_blocked else: task.kanban_state_label = task.legend_done def _compute_access_url(self):", "changing stage'), ('periodic', 'Periodical Rating') ], 'Customer Ratings Status', default=\"stage\",", "'week' task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year' @api.depends('recurring_task') def", "does it on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id')", "False), ('stage_id', '=', False)], ['project_id'], ['project_id']) result = dict((data['project_id'][0], data['project_id_count'])", "planned of this task.\") user_id = fields.Many2one('res.users', string='Assigned to', default=lambda", "_compute_disabled_rating_warning(self): for stage in self: disabled_projects = stage.project_ids.filtered(lambda p: not", "_('In Progress'), translate=True, required=True, help='Override the default value displayed for", "sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self):", "number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after' else 5)", "the visibility of the tasks of the project:\\n\" \"- Invited", "disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active) if disabled_projects: stage.disabled_rating_warning =", "set the kanban state to 'blocked' (red bullet).\\n\") is_closed =", "'Normal'), ('1', 'Important'), ], default='0', index=True, string=\"Priority\") sequence = fields.Integer(string='Sequence',", "group_func, {}) if not self.user_id and not self.stage_id.fold: take_action =", "= _('Ratings of %s') % (self.name,) action_context = ast.literal_eval(action['context']) if", "translate=True) tasks = fields.One2many('project.task', 'project_id', string=\"Task Activities\") resource_calendar_id = fields.Many2one(", "project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return project def write(self, vals): allowed_users_changed", "fields.Date(string=\"End Date\", compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat', readonly=False)", "self)._compute_access_warning() for project in self.filtered(lambda x: x.privacy_visibility != 'portal'): project.access_warning", "compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat', readonly=False) repeat_on_month =", "@api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task in self: task.subtask_planned_hours = sum(child_task.planned_hours", "fields.Datetime(\"Created On\", readonly=True, index=True) write_date = fields.Datetime(\"Last Updated On\", readonly=True,", "for full copyright and licensing details. import ast from datetime", "= duration_data['days'] else: task.working_hours_open = 0.0 task.working_days_open = 0.0 if", "new_task return project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self', lambda value: value.id)", "rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id() if not res and self.project_id.partner_id:", "when subscribing to a project And add the portal user", "= fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until',", "the recurrence first.')) return super().unlink() # --------------------------------------------------- # Subtasks #", "vals['date_assign'] = now # recurrence fields rec_fields = vals.keys() &", "task can be in a stage even if the project", "the feedback for this stage.\\n\" \" * A good feedback", "self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id", "project is empty for project in self.with_context(active_test=False): if project.tasks: raise", "['project_id'], ['project_id']) result = dict((data['project_id'][0], data['project_id_count']) for data in task_data)", "pdata['id'] in allowed_user_ids new_group = ('group_project_user', group_func, {}) if not", "and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self recurrence_update = vals.pop('recurrence_update',", "if vals.get('user_id') and 'date_assign' not in vals: vals['date_assign'] = now", "2) if the parent task partner_id changes, the task partner_id", "random import randint from odoo import api, fields, models, tools,", "project), # by using default get (instead of _get_default_stage_id or", "their project if any. \"\"\" aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company,", "'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id',", "compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed Internal Users\", default=lambda", "partner: partner.email == self.email_from) if new_partner: self.search([ ('partner_id', '=', False),", "= len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id != self.company_id: self.project_id", "_check_company_auto = True def _get_default_stage_id(self): \"\"\" Gives default stage_id \"\"\"", "(str(i), str(i)) for i in range(1, 32) ], compute='_compute_repeat', readonly=False)", "in self: recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date',", "feedback will set the kanban state to 'blocked' (red bullet).\\n\")", "the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id']) project_ids", "if subtype_ids else None task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub:", "project.partner_phone != project.partner_id.phone: project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self): for project", "= vals.get('project_id') or self.env.context.get('default_project_id') if project_id and not \"company_id\" in", "'project_id.partner_id') def _compute_partner_id(self): \"\"\" If a task has no partner_id,", "task.legend_blocked else: task.kanban_state_label = task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url() for", "'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task',", "as', default='Tasks', help=\"Label used for the tasks of the project.\",", "and licensing details. import ast from datetime import timedelta, datetime", "be called once a day by the scheduler @api.model def", "reaches this step.\") fold = fields.Boolean(string='Folded in Kanban', help='This stage", "remove default author when going through the mail gateway. Indeed", "It can be the current project itself.\") allow_subtasks = fields.Boolean('Sub-tasks',", "'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self):", "list(domain) # perform search, return the first found return self.env['project.task.type'].search(search_domain,", "= fields.Selection( [('stage', 'Rating when changing stage'), ('periodic', 'Periodical Rating')", "'February'), ('march', 'March'), ('april', 'April'), ('may', 'May'), ('june', 'June'), ('july',", "copy archived task, but do not propagate an active_test context", "default=lambda self: self.env.uid, index=True, tracking=True) partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id',", "self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year,", "task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids = [(6, 0, list(set(attachment_ids) -", "self.env.context.get('default_project_id') return [default_project_id] if default_project_id else None active = fields.Boolean('Active',", "= self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] = default_stage[project_id] # user_id change:", "task according to the email. \"\"\" email_list = self.email_split(msg) partner_ids", "--------------------------------------------------- def _track_template(self, changes): res = super(Task, self)._track_template(changes) test_task =", "is_closed = fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\", readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task',", "_compute_partner_email(self): for project in self: if project.partner_id and project.partner_id.email !=", "fold = fields.Boolean(string='Folded in Kanban', help='This stage is folded in", "values.get('name', _('Unknown Analytic Account')), 'company_id': values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'),", "and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return project def write(self, vals):", "|= project # Project User has no write access for", "help='Override the default value displayed for the blocked state for", "'This task'), ('subsequent', 'This and following tasks'), ('all', 'All tasks'),", "parent task.\")) if 'active' in vals and not vals.get('active') and", "fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False,", "project in self: users = project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids =", "task.recurrence_id: task[f] = task.recurrence_id[f] else: if task.recurring_task: task[f] = defaults.get(f)", "= self.project_id.privacy_visibility == 'portal' for group_name, group_method, group_data in groups:", "= result.get(project.id, 0) def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] =", "for financial management. \" \"Use an analytic account to record", "= fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True,", "too restricted. Set the privacy of the project to 'Visible", "= task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'day')", "project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self): action", "('date', 'Date of the Year'), ('day', 'Day of the Year'),", "readonly=False, index=True, tracking=True, check_company=True, change_default=True) planned_hours = fields.Float(\"Initially Planned Hours\",", "if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence =", "if self.id: values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or \"{}\") defaults['project_id']", "!= task.partner_id.email will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone if", "the sub-tasks linked to this task. Usually less or equal", "fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main", "\"company_id\" in vals: vals[\"company_id\"] = self.env[\"project.project\"].browse( project_id ).company_id.id or self.env.company.id", "Label', default=lambda s: _('Blocked'), translate=True, required=True, help='Override the default value", "msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values) task = super(Task,", "\"- Rating when changing stage: an email will be sent", "the batch creation of tasks # 2) Ensure the defaults", "records in that stage to display.') rating_template_id = fields.Many2one( 'mail.template',", "task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults) old_to_new_tasks[task.id] =", "# do not want to explicitly set user_id to False;", "vals.get('subtask_project_id'): project.subtask_project_id = project.id if project.privacy_visibility == 'portal' and project.partner_id.user_ids:", "RedirectWarning from odoo.tools.misc import format_date, get_lang from odoo.osv.expression import OR", "If depth == 1, return only direct children # If", "elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end', 'date_assign')", "# directly compute is_favorite to dodge allow write access right", "fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri',", "portal_users = task.allowed_user_ids.filtered('share') if portal_users: user_names = ', '.join(portal_users[:10].mapped('name')) raise", "compute='_compute_repeat', readonly=False) mon = fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False) tue = fields.Boolean(string=\"Tue\",", "and task.repeat_number > 5 or task.repeat_type == 'forever' or len(recurring_dates)", "Stage change: Update date_end if folded stage and date_last_stage_update if", "suggested recipients. This heuristic allows to avoid ugly hacks in", "self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for task in", "fields.Selection([ (str(i), str(i)) for i in range(1, 32) ], compute='_compute_repeat',", "help=\"Gives the sequence order when displaying a list of tasks.\")", "self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values and", "not propagate an active_test context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=',", "search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain stage_ids =", "return headers def _message_post_after_hook(self, message, msg_vals): if message.attachment_ids and not", "base.stage method Parameter of the stage search taken from the", "project in self: permission_removed = allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed =", "vals.keys() & self._get_recurrence_fields() if rec_fields: rec_values = {rec_field: vals[rec_field] for", "values.get('partner_id'), 'active': True, }) return analytic_account def _create_analytic_account(self): for project", "will be sent to the customer when the task or", "a project does it on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']})", "'/my/project/%s' % project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning() for project in", "will_write_phone: task.ribbon_message = _('By saving this change, the customer phone", "Tasks as', default='Tasks', help=\"Label used for the tasks of the", "return { 'name': _('Parent Task'), 'view_mode': 'form', 'res_model': 'project.task', 'res_id':", "res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return res def email_split(self, msg):", "_name = 'project.task.type' _description = 'Task Stage' _order = 'sequence,", "self.env['account.analytic.account'] for project in self: if project.analytic_account_id and not project.analytic_account_id.line_ids:", "super(Project, self)._compute_access_warning() for project in self.filtered(lambda x: x.privacy_visibility != 'portal'):", "old_to_new_tasks[task.id] = new_task.id tasks += new_task return project.write({'tasks': [(6, 0,", "too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'): for project in", "favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite", "_inverse_partner_email(self): for task in self: if task.partner_id and task.partner_email !=", "rating of the project and activate default filters\"\"\" action =", "Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly') _sql_constraints", "_inverse_is_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in self:", "recurring_tasks = self.filtered(lambda l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)],", "(and computed once by project), # by using default get", "def _default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def", "super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg) partner_ids = [p.id for", "step.\") auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False, help=\"Automatically modify the", "'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday']", "write_date = fields.Datetime(\"Last Updated On\", readonly=True, index=True) date_end = fields.Datetime(string='Ending", "'October'), ('november', 'November'), ('december', 'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow =", "for task in self} leftover = self.filtered(lambda rec: not rec.project_id)", "= task.repeat_interval if task.repeat_unit == 'day' else 1 recurring_dates =", "for task in project.task_ids: task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks' in", "stop the recurrence raise UserError(_('You cannot delete recurring tasks. Please,", "sent periodically.\\n\\n\" \"Don't forget to set up the mail templates", "= 0.0 task.working_days_close = 0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close',", "in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |= project #", "= new_task.id tasks += new_task return project.write({'tasks': [(6, 0, tasks.ids)]})", "'Weeks'), ('month', 'Months'), ('year', 'Years'), ], default='week', compute='_compute_repeat', readonly=False) repeat_type", "utf-8 -*- # Part of Odoo. See LICENSE file for", "partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users = all_users", "if action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None)", "task in self: if not task._check_recursion(): raise ValidationError(_('Error! You cannot", "'>=', task.create_date)]]) else: recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)] tasks |=", "if 'repeat_until' in default_fields: vals['repeat_until'] = fields.Date.today() + timedelta(days=7) if", "vals or 'allowed_internal_user_ids' in vals if allowed_users_changed: allowed_users = {project:", "the initially time planned of this task.\") user_id = fields.Many2one('res.users',", "(copy)\") % (self.name) project = super(Project, self).copy(default) if self.subtask_project_id ==", "allowed_user_ids, {} )) portal_privacy = self.project_id.privacy_visibility == 'portal' for group_name,", "Subject\"), 'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values) task", "'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': context, } def", "dodge allow write access right if 'is_favorite' in vals: vals.pop('is_favorite')", "'name': _('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type':", "hours to close', store=True, group_operator=\"avg\") working_days_open = fields.Float(compute='_compute_elapsed', string='Working days", "vals['date_last_stage_update'] = fields.Datetime.now() # recurrence rec_fields = vals.keys() & self._get_recurrence_fields()", "user_id to False; however we do not # want the", "in self} # directly compute is_favorite to dodge allow write", "_inverse_partner_email(self): for project in self: if project.partner_id and project.partner_email !=", "dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close = duration_data['days'] else: task.working_hours_close", "project.partner_id and project.partner_email != project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def", "message_new that is called by the mailgateway through message_process. This", "self.with_context(active_test=False) # retrieves all the projects with a least 1", "this project should be displayed on your dashboard.\") label_tasks =", "if project_subtypes else None if not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe(", "string=\"Until\", compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string=\"End Date\", compute='_compute_repeat', readonly=False) repeat_number", "'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count} for", "= fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids',", "else {} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None) return dict(action,", "if 'parent_id' in vals and vals['parent_id'] in self.ids: raise UserError(_(\"Sorry.", "the current project will be created. It can be the", "'project.project'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'project.task'), ('res_id', 'in',", "'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'), ], string='Day Of", "return action def action_recurring_tasks(self): return { 'name': 'Tasks in Recurrence',", "email will be sent when a task is pulled in", "from all tasks when unsubscribing from a project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids,", "for the blocked state for kanban selection, when the task", "a project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def", "'=', project.id), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids) ])", "= fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([ ('date', 'Date", "self: project.access_url = '/my/project/%s' % project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning()", "message_update(self, msg, update_vals=None): \"\"\" Override to update the task according", "'partner_id') def _compute_ribbon_message(self): for task in self: will_write_email = task.partner_id", "from odoo.tools.misc import format_date, get_lang from odoo.osv.expression import OR from", "values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or \"{}\") defaults['project_id'] = self.id", "not vals.get('active') and any(self.mapped('recurrence_id')): # TODO: show a dialog to", "to the duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task =", "(msg.get('cc') or '')) # check left-part is not already an", "'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False}) if 'active' in vals:", "compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([ ('first', 'First'), ('second', 'Second'), ('third',", "the customer email will also be updated.') elif will_write_phone: task.ribbon_message", "Methods # --------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({", "1 task in that stage # a task can be", "project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- # Rating business # --------------------------------------------------- #", "to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string=\"Project", "for i in range(1, 32) ], compute='_compute_repeat', readonly=False) repeat_week =", "company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company) currency_id =", "'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults = self.default_get(rec_fields)", "all existing active tasks when subscribing to a project And", "'=', id), ('mimetype', 'ilike', 'image')]\", string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked',", "do not # want the gateway user to be responsible", "a dialog to stop the recurrence raise UserError(_('You cannot delete", "vals['repeat_until'] = fields.Date.today() + timedelta(days=7) if 'repeat_weekday' in default_fields: vals['repeat_weekday']", "licensing details. import ast from datetime import timedelta, datetime from", "('year', 'Years'), ], default='week', compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([ ('forever',", "User has no write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]})", "in self: task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id) and", "stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active = fields.Boolean(default=True) name =", "= fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False) sun = fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False) repeat_day", "= False @api.returns('self', lambda value: value.id) def copy(self, default=None): if", "'Blocked')], string='Kanban State', copy=False, default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban", "the mailgateway through message_process. This override updates the document according", "p in disabled_projects) else: stage.disabled_rating_warning = False class Project(models.Model): _name", "according to the email. \"\"\" email_list = self.email_split(msg) partner_ids =", "name, id\" _rating_satisfaction_days = False # takes all existing ratings", "c.get('recurrence_id_count') for c in count} for task in recurring_tasks: task.recurring_count", "task_data) for project in self: project.task_count = result.get(project.id, 0) def", "domain=\"[('project_ids', '=', project_id)]\", copy=False) tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state =", "return children + children._get_all_subtasks(depth - 1) def action_open_parent_task(self): return {", "company=company, doc_names=None) res = {task.id: aliases.get(task.project_id.id) for task in self}", "may only see the followed project and tasks.\\n\" \"- All", "recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in", "= project.partner_id.email def _inverse_partner_email(self): for project in self: if project.partner_id", "attachment_ids because a one2many is represented as a list of", "this project. Incoming emails are automatically synchronized \" \"with Tasks", "partner_id. Once the task partner_id has been set: 1) if", "= fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def", "task in self: if task.project_id: if task.project_id not in task.stage_id.project_ids:", "= fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True, check_company=True, change_default=True)", "readonly=False) fri = fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False) sat = fields.Boolean(string=\"Sat\", compute='_compute_repeat',", "task.partner_id: reason = _('Customer Email') if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients,", "leftover = self.filtered(lambda rec: not rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default,", "self.ensure_one() if 'kanban_state_label' in init_values and self.kanban_state == 'blocked': return", "parent task partner_id. Once the task partner_id has been set:", "default=True) name = fields.Char(string='Stage Name', required=True, translate=True) description = fields.Text(translate=True)", "portal users and portal customers. If they are notified they", "its parent task.\")) if 'active' in vals and not vals.get('active')", "stage_find(self, section_id, domain=[], order='sequence'): \"\"\" Override of the base.stage method", "index=True, tracking=True, check_company=True, change_default=True) planned_hours = fields.Float(\"Initially Planned Hours\", help='Time", "= self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id or self.project_id ctx =", "be sent to the customer when the task or issue", "string=\"Analytic Account\", copy=False, ondelete='set null', domain=\"['|', ('company_id', '=', False), ('company_id',", "'check(date >= date_start)', 'Error! project start-date must be lower than", "a Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly')", "for date in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' % date.strftime(date_format) if", "depth == 3, return children to third generation # If", "== 'portal' and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return project def", "we couln't use attachment_ids because a one2many is represented as", "repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type',", "self: if task.partner_id and task.partner_id.email != task.partner_email: task.partner_email = task.partner_id.email", "the project partner_id if any, or else the parent task", "project.partner_id.phone: project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self): for project in self:", "description = fields.Html(string='Description') priority = fields.Selection([ ('0', 'Normal'), ('1', 'Important'),", "date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() # recurrence rec_fields", "pdata['type'] == 'user' and project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility ==", "action['name'] = _('Ratings of %s') % (self.name,) action_context = ast.literal_eval(action['context'])", "sub-tasks).', tracking=True) subtask_planned_hours = fields.Float(\"Sub-tasks Planned Hours\", compute='_compute_subtask_planned_hours', help=\"Sum of", "self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for", "be in a stage even if the project is not", "groups def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None): \"\"\" Override to", "we do not # want the gateway user to be", "privacy of the project is too restricted. Set the privacy", "on dashboard', help=\"Whether this project should be displayed on your", "string='Day Of The Week', compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([ ('january',", "# Rating business # --------------------------------------------------- # This method should be", "and project.partner_email != project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self):", "task.repeat_type == 'after' and task.repeat_number > 5 or task.repeat_type ==", "'Date of the Month'), ('day', 'Day of the Month'), ],", "\" Portal users may see project and tasks followed by\\n\"", "for user in new_allowed_users]}) return res # ---------------------------------------- # Case", "self.env.context: search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain stage_ids", "{task.id: aliases.get(task.project_id.id) for task in self} leftover = self.filtered(lambda rec:", "self).rating_get_partner_id() if not res and self.project_id.partner_id: return self.project_id.partner_id return res", "@api.model def create(self, vals): # Prevent double project creation self", "test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid':", "the project:\\n\" \"- Invited internal users: employees may only see", "internal users'), ('portal', 'Invited portal users and all internal users'),", "'<ul>' for date in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' % date.strftime(date_format)", "- section_id: if set, stages must belong to this section", "the privacy of the project to 'Visible by following customers'", "'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for task in self: task.repeat_show_day", "notified they should probably have access to the document. \"\"\"", "selection, when the task or issue is in that stage.')", "project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return", "task, project): \"\"\" get the default value for the copied", "self._map_tasks_default_valeus(task, project) if task.parent_id: # set the parent to the", "folded in the kanban view when there are no records", "project.partner_id.id, 'active': True, }) project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- # Rating", "for the copied task on project duplication \"\"\" return {", "'portal'): project.access_warning = _( \"The project cannot be shared with", "task in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_id.phone", "default_stage[project_id] # user_id change: update date_assign if vals.get('user_id'): vals['date_assign'] =", "project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return {'date_end': False} def unlink(self): if", "'planned_hours': 0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg,", "fields.Date(string='Start Date') date = fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id =", "= self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until' in default_fields: vals['repeat_until'] =", "not assigned to the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)],", "to this section or be a default stage; if not", "rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection(", "and managers recipients that can assign tasks and create new", "users'), ], string='Visibility', required=True, default='portal', help=\"Defines the visibility of the", "disabled_projects) else: stage.disabled_rating_warning = False class Project(models.Model): _name = \"project.project\"", "fields, models, tools, SUPERUSER_ID, _ from odoo.exceptions import UserError, AccessError,", "self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label' in init_values and", "scheduler @api.model def _send_rating_all(self): projects = self.search([ ('rating_active', '=', True),", "and 'date_assign' not in vals: vals['date_assign'] = now # recurrence", "'.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The project visibility setting doesn't allow portal users", "default=False, help=\"Automatically modify the kanban state when the customer replies", "!= 'portal'): portal_users = task.allowed_user_ids.filtered('share') if portal_users: user_names = ',", "for task in self: for f in rec_fields: if task.recurrence_id:", "context = dict(self.env.context) context['stage_view'] = stage_view return { 'name': _('Delete", "permission_removed if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in',", "task.partner_id.phone: task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self): for task in self:", "in self: project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def", "employees may see all project and tasks.\\n\" \"- Invited portal", "_compute_partner_phone(self): for task in self: if task.partner_id and task.partner_phone !=", "self.env.cr.commit() class Task(models.Model): _name = \"project.task\" _description = \"Task\" _date_name", "= fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator=\"avg\") working_hours_close =", "task.partner_id and task.partner_email != task.partner_id.email will_write_phone = task.partner_id and task.partner_phone", "get_empty_list_help(self, help): tname = _(\"task\") project_id = self.env.context.get('default_project_id', False) if", "name = fields.Char(string='Stage Name', required=True, translate=True) description = fields.Text(translate=True) sequence", "return result def update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold", "color = fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email', string='User Email', readonly=True,", "= True def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for project in", "self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or {}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id", "tasks = super().create(vals_list) for task in tasks: if task.project_id.privacy_visibility ==", "== 'portal' for group_name, group_method, group_data in groups: if group_name", "fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open", "if group_name in ('customer', 'user') or group_name == 'portal_customer' and", "the Month'), ('day', 'Day of the Month'), ], default='date', compute='_compute_repeat',", "has been set: 1) if the project partner_id changes, the", "Ongoing Explanation', readonly=True, related_sudo=False) is_closed = fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\", readonly=True,", "inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed Internal Users\", default=lambda self:", "write access right if 'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res", "compute='_compute_partner_id', store=True, readonly=False, domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\")", "project in self: if self.env.user in project.favorite_user_ids: favorite_projects |= project", "self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])]", "else: task.working_hours_open = 0.0 task.working_days_open = 0.0 if task.date_end: dt_date_end", "user to be responsible if no other responsible is #", "= fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string=\"Recurrent\") recurring_count = fields.Integer(string=\"Tasks in Recurrence\",", "self.filtered( lambda task: task.project_id.resource_calendar_id and task.create_date ) for task in", "action = self.with_context(active_id=self.id, active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0] action['display_name'] =", "computed once by project), # by using default get (instead", "task in that stage # a task can be in", "fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\", context={'active_test': False}) subtask_project_id = fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task", "'portal' and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return project def write(self,", "fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False) # retrieves all", "task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self):", "with all default values as 'quick_create' does not contains all", "# customer portal: include comment and incoming emails in communication", "\"sequence, name, id\" _rating_satisfaction_days = False # takes all existing", "'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return res def _creation_subtype(self): return", "('res_model', '=', 'project.project'), ('res_id', '=', project.id), '&', ('res_model', '=', 'project.task'),", "'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target':", "task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close", "project_task_type.fold or project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return {'date_end': False} def", "domain=[('model', '=', 'project.task')], help=\"If set and if the project's rating", "message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Subscribe to all existing active", "= {rec_field: vals[rec_field] for rec_field in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today()", "self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage', 'Rating when changing stage'),", "super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or {}) self.ensure_one() project_user_group_id =", "@api.depends('partner_id.email') def _compute_partner_email(self): for task in self: if task.partner_id and", "self: project.task_count = result.get(project.id, 0) def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment')", "return self.env['project.task'] if depth == 1: return children return children", "], default='this', store=False) recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval =", "project # Project User has no write access for project.", "action_open_parent_task(self): return { 'name': _('Parent Task'), 'view_mode': 'form', 'res_model': 'project.task',", "when displaying a list of tasks.\") stage_id = fields.Many2one('project.task.type', string='Stage',", "task.recurrence_id = recurrence.id if 'recurring_task' in vals and not vals.get('recurring_task'):", "documents attached\") date_start = fields.Date(string='Start Date') date = fields.Date(string='Expiration Date',", "task def message_update(self, msg, update_vals=None): \"\"\" Override to update the", "in that stage.') legend_done = fields.Char( 'Green Kanban Label', default=lambda", "fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage Update', index=True,", "# --------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({ 'name':", "and any(self.mapped('recurrence_id')): # TODO: show a dialog to stop the", "Label', default=lambda s: _('Ready'), translate=True, required=True, help='Override the default value", "reason=_('Customer Email')) return recipients def _notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict()", "email_split(self, msg): email_list = tools.email_split((msg.get('to') or '') + ',' +", "be updated.') elif will_write_phone: task.ribbon_message = _('By saving this change,", "return {'date_end': False} def unlink(self): if any(self.mapped('recurrence_id')): # TODO: show", "rating_status_period = fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a", "periodically.\\n\\n\" \"Don't forget to set up the mail templates on", "if project.tasks: raise UserError(_('You cannot delete a project containing tasks.", "compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week =", "task in self: will_write_email = task.partner_id and task.partner_email != task.partner_id.email", "and task.repeat_unit == 'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields = self._get_recurrence_fields()", "fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\", required=True, help=\"Internal email associated with this project.", "> 5: task.recurrence_message += '<li>...</li>' task.recurrence_message += '</ul>' if task.repeat_type", "itself.\") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring", "for project in self: project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0))", "too restricted. Set the privacy to 'Visible by following customers'", "recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]])", "old_to_new_tasks = {} for task in self.env['project.task'].browse(task_ids): # preserve task", "from random import randint from odoo import api, fields, models,", "fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed Internal Users\",", "('all', 'All tasks'), ], default='this', store=False) recurrence_message = fields.Char(string='Next Recurrencies',", "of the project and activate default filters\"\"\" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating')", "project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids = users def _inverse_allowed_user(self): for project", "in vals and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status ==", "through the chatter using # suggested recipients. This heuristic allows", "lambda task: task.project_id.resource_calendar_id and task.create_date ) for task in task_linked_to_calendar:", "the tasks of the project.\", translate=True) tasks = fields.One2many('project.task', 'project_id',", "project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning() for project in self.filtered(lambda x:", "by\\n\" \" them or by someone of their company.\") allowed_user_ids", "for task in self: will_write_email = task.partner_id and task.partner_email !=", "= fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\", context={'active_test': False}) subtask_project_id = fields.Many2one('project.project', related=\"project_id.subtask_project_id\",", "{} )) portal_privacy = self.project_id.privacy_visibility == 'portal' for group_name, group_method,", "self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users", "'Tasks in Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form', 'domain':", "for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p] self.message_subscribe(partner_ids) return", "other responsible is # found. create_context = dict(self.env.context or {})", "the sequence order when displaying a list of Projects.\") partner_id", "project will be created. It can be the current project", "'=', 'project.project'), ('res_id', '=', project.id), '&', ('res_model', '=', 'project.task'), ('res_id',", "in rec_fields: if task.recurrence_id: task[f] = task.recurrence_id[f] else: if task.recurring_task:", "task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail gateway # --------------------------------------------------- def _track_template(self,", "vals @api.model_create_multi def create(self, vals_list): default_stage = dict() for vals", "Repetitions'), ], default=\"forever\", string=\"Until\", compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string=\"End Date\",", "fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main Attachments\", help=\"Attachment that don't come from message.\")", "else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval, task.repeat_unit,", "def copy(self, default=None): if default is None: default = {}", "project_id and \"stage_id\" not in vals: # 1) Allows keeping", "x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self, stage_id): project_task_type =", "subscribed to allowed portal users \"\"\" res = super(Project, self).message_subscribe(partner_ids=partner_ids,", "d in days): vals[days[week_start]] = True if 'repeat_day' in default_fields:", "= ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The project visibility setting doesn't allow", "# Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): \"\"\" If", "fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False) is_closed = fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing", "project.allowed_portal_user_ids project.allowed_user_ids = users def _inverse_allowed_user(self): for project in self:", "context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks =", "account to record cost and revenue on your project.\") favorite_user_ids", "task in self.env['project.task'].browse(task_ids): # preserve task name and stage, normally", "} def unlink(self): # Check project is empty for project", "* A good feedback from the customer will update the", "self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks = {} for task in", "@api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for task in self: if task.kanban_state", "default is None: default = {} if not default.get('name'): default['name']", "> 5 or task.repeat_type == 'forever' or len(recurring_dates) > 5:", "task'), ('subsequent', 'This and following tasks'), ('all', 'All tasks'), ],", "class Task(models.Model): _name = \"project.task\" _description = \"Task\" _date_name =", "= fields.Selection([ ('0', 'Normal'), ('1', 'Important'), ], default='0', index=True, string=\"Priority\")", "Rating business # --------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for task in", "if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True)", "\"\"\" copy and map tasks from old to new project", "{} if not default.get('name'): default['name'] = _(\"%s (copy)\", self.name) if", "'=', 'project.project'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'project.task'), ('res_id',", "close', store=True, group_operator=\"avg\") # customer portal: include comment and incoming", "self.env.uid, index=True, tracking=True) partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False,", "in self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id", "models, tools, SUPERUSER_ID, _ from odoo.exceptions import UserError, AccessError, ValidationError,", "{rec_field: vals[rec_field] for rec_field in rec_fields} for task in self:", "] @api.depends('partner_id.email') def _compute_partner_email(self): for project in self: if project.partner_id", "= {} if not default.get('name'): default['name'] = _(\"%s (copy)\") %", "from message.\") # In the domain of displayed_image_id, we couln't", "in self: permission_removed = allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share')", "'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for task in self:", "self)._compute_access_url() for project in self: project.access_url = '/my/project/%s' % project.id", "for task in self: if task.partner_id: reason = _('Customer Email')", "'after' or self.repeat_number) and\\ (self.repeat_type != 'until' or self.repeat_until and", "& self._get_recurrence_fields() if rec_fields: rec_values = {rec_field: vals[rec_field] for rec_field", "return res def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None): return super(Task,", "tasks. You can either archive it or first delete all", "('bimonthly', 'Twice a Month'), ('monthly', 'Once a Month'), ('quarterly', 'Quarterly'),", "groups: if group_name in ('customer', 'user') or group_name == 'portal_customer'", "state for kanban selection, when the task or issue is", "overrides # ------------------------------------------------ @api.model def default_get(self, default_fields): vals = super(Task,", "project's rating configuration is 'Rating when changing stage', then an", "dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data =", "if x.split('@')[0] not in aliases] @api.model def message_new(self, msg, custom_values=None):", "'stage_ids': self.ids }) context = dict(self.env.context) context['stage_view'] = stage_view return", "not self.partner_id: # we consider that posting a message with", "[('|')] * (len(section_ids) - 1) for section_id in section_ids: search_domain.append(('project_ids',", "either archive it or first delete all of its tasks.'))", "(test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return", "reset kanban state when changing stage if 'kanban_state' not in", "= fields.Integer(string='Sequence', index=True, default=10, help=\"Gives the sequence order when displaying", "mail_template_id = fields.Many2one( 'mail.template', string='Email Template', domain=[('model', '=', 'project.task')], help=\"If", "a: a.mimetype == 'image') if image_attachments: self.displayed_image_id = image_attachments[0] if", "tue = fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False) wed = fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False)", "('december', 'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day =", "project.tasks: raise UserError(_('You cannot delete a project containing tasks. You", "project and activate default filters\"\"\" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] =", "to update the task according to the email. \"\"\" email_list", "super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else", "or sub.default)).ids if project_subtypes else None if not subtype_ids or", "# perform search, return the first found return self.env['project.task.type'].search(search_domain, order=order,", "stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id']) project_ids =", "fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) company_id = fields.Many2one('res.company',", "task name and stage, normally altered during copy defaults =", "group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=', project_id)]\", copy=False) tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state", "[('account_id', '=', self.analytic_account_id.id)] return action def action_view_all_rating(self): \"\"\" return the", "task.message_subscribe(partner_ids) return task def message_update(self, msg, update_vals=None): \"\"\" Override to", "email alias', compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\", required=True,", "project_ids = list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids)) wizard", "tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=', project_id)]\", copy=False) tag_ids =", "in vals: vals['date_assign'] = now # recurrence fields rec_fields =", "'partner_id': project.partner_id.id, 'active': True, }) project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- #", "False @api.depends('project_id.company_id') def _compute_company_id(self): for task in self.filtered(lambda task: task.project_id):", "favorite_projects |= project else: not_fav_projects |= project # Project User", "def action_view_account_analytic_line(self): \"\"\" return the action to see all the", "= self.with_context(active_test=False) # retrieves all the projects with a least", "customer feedback?\\n\" \"- Rating when changing stage: an email will", "subtype_ids=None): \"\"\" Subscribe to all existing active tasks when subscribing", "result = super(Task, tasks).write(vals) # rating on stage if 'stage_id'", "self.env.user in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo()", "'name': _('Delete Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')],", "task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility", "|= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -= portal_users elif", "during copy defaults = self._map_tasks_default_valeus(task, project) if task.parent_id: # set", "Planned Hours\", help='Time planned to achieve this task (including its", "changing stage', then an email will be sent to the", "raise UserError(_('You cannot delete recurring tasks. Please, disable the recurrence", "users subscribed to allowed portal users \"\"\" res = super(Task,", "task.partner_phone != task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def", "list of commands so we used res_model & res_id displayed_image_id", "is not assigned to the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in',", "range(1, 32) ], compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([ ('first', 'First'),", "legend_blocked = fields.Char( 'Red Kanban Label', default=lambda s: _('Blocked'), translate=True,", "in count} for task in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0)", "return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue',", "responsible is # found. create_context = dict(self.env.context or {}) create_context['default_user_id']", "no write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3,", "Hours\", compute='_compute_subtask_planned_hours', help=\"Sum of the time planned of all the", "Attachment = self.env['ir.attachment'] for project in self: project.doc_count = Attachment.search_count([", "removing it.\") sequence = fields.Integer(default=10, help=\"Gives the sequence order when", "You cannot create recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self):", "the default value displayed for the blocked state for kanban", "self.env.user in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |= project", "task in self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=',", "action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None) return dict(action, context=action_context) #", "recipients def _notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict() if self.project_id: current_objects", "= self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display all subtasks of current task action['domain']", "order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active = fields.Boolean(default=True) name = fields.Char(string='Title',", "= self.mapped('project_id.alias_name') return [x for x in email_list if x.split('@')[0]", "= \"Project Tags\" def _get_default_color(self): return randint(1, 11) name =", "vals_list: project_id = vals.get('project_id') or self.env.context.get('default_project_id') if project_id and not", "def _compute_stage_id(self): for task in self: if task.project_id: if task.project_id", "= task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url() for task in self:", "_(\"task\") project_id = self.env.context.get('default_project_id', False) if project_id: name = self.env['project.project'].browse(project_id).label_tasks", "copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\") # Computed field about", "= self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month,", "task cannot be shared with the recipient(s) because the privacy", "[new_group] + groups if self.project_id.privacy_visibility == 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids", "In the domain of displayed_image_id, we couln't use attachment_ids because", "0) @api.depends('partner_id.email') def _compute_partner_email(self): for task in self: if task.partner_id", "'Grey Kanban Label', default=lambda s: _('In Progress'), translate=True, required=True, help='Override", "method should be called once a day by the scheduler", "('june', 'June'), ('july', 'July'), ('august', 'August'), ('september', 'September'), ('october', 'October'),", "working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator=\"avg\") working_days_open", "compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string=\"End Date\", compute='_compute_repeat', readonly=False) repeat_number =", "change, the customer email and phone number will also be", "are correct (and computed once by project), # by using", "self.env.context['default_project_id'])] + search_domain stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids)", "# user_id change: update date_assign if vals.get('user_id') and 'date_assign' not", "project_user_group_id = self.env.ref('project.group_project_user').id group_func = lambda pdata: pdata['type'] == 'user'", "message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users', string='Project", "('res_model', '=', 'project.task'), ('res_id', 'in', self.task_ids.ids) ]) action['context'] = \"{'default_res_model':", "True if allowed_users_changed: for project in self: permission_removed = allowed_users.get(project)", "= fields.Datetime.now() # Stage change: Update date_end if folded stage", "if task.repeat_type == 'after' and task.repeat_number > 5 or task.repeat_type", "('month', 'Months'), ('year', 'Years'), ], default='week', compute='_compute_repeat', readonly=False) repeat_type =", "self} # directly compute is_favorite to dodge allow write access", "compute='_compute_repeat', readonly=False) sun = fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([", "else: if task.recurring_task: task[f] = defaults.get(f) else: task[f] = False", "doesn't allow portal users to see the project's tasks. (%s)\",", "'active': True, }) return analytic_account def _create_analytic_account(self): for project in", "('day', 'Day of the Year'), ], default='date', compute='_compute_repeat', readonly=False) mon", "string=\"Priority\") sequence = fields.Integer(string='Sequence', index=True, default=10, help=\"Gives the sequence order", "self = self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals) if not vals.get('subtask_project_id'):", "or {}) create_context['default_user_id'] = False if custom_values is None: custom_values", "readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0]", "store=True, readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=', project_id)]\",", "return project def write(self, vals): allowed_users_changed = 'allowed_portal_user_ids' in vals", "and if the project's rating configuration is 'Rating when changing", "in self: if task.partner_id: if task.project_id.partner_id: task.partner_id = task.project_id.partner_id else:", "import UserError, AccessError, ValidationError, RedirectWarning from odoo.tools.misc import format_date, get_lang", "('is_closed', '=', False)]) @api.model def _default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id", "tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]}) return res #", "set: 1) if the project partner_id changes, the task partner_id", "rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if", "time elapsed between record creation and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed',", "'Error! project start-date must be lower than project end-date.') ]", "rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token, feedback=feedback,", "Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): \"\"\" If a", "('this', 'This task'), ('subsequent', 'This and following tasks'), ('all', 'All", "return project @api.model def create(self, vals): # Prevent double project", "task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id != self.company_id:", "default values as 'quick_create' does not contains all field in", "'mail.thread', 'rating.parent.mixin'] _order = \"sequence, name, id\" _rating_satisfaction_days = False", "self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active': True, })", "for k, v in ctx.items() if not k.startswith('search_default_')} ctx.update({ 'default_name':", "default_fields: vals['repeat_until'] = fields.Date.today() + timedelta(days=7) if 'repeat_weekday' in default_fields:", "task.repeat_on_year == 'date') task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month'", "help=\"If set an email will be sent to the customer", "even if the project is not assigned to the stage", "= fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks =", "= self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active': True,", "= fields.Float(\"Initially Planned Hours\", help='Time planned to achieve this task", "subtask field in `default_get` 'default_company_id': default_project.company_id.id if default_project else self.env.company.id,", "because a one2many is represented as a list of commands", "by using default get (instead of _get_default_stage_id or _stage_find), if", "'=', False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed Portal Users\", domain=[('share',", "default=\"stage\", required=True, help=\"How to get customer feedback?\\n\" \"- Rating when", "is in that stage.') legend_normal = fields.Char( 'Grey Kanban Label',", "project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |= project # Project", "task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [ ('fold', '=', False), ('is_closed', '=',", "and task.partner_phone != task.partner_id.phone if will_write_email and will_write_phone: task.ribbon_message =", "default='week', compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([ ('forever', 'Forever'), ('until', 'End", "'after' and task.repeat_number > 5 or task.repeat_type == 'forever' or", "this project is linked for financial management. \" \"Use an", "\"project.project\" _description = \"Project\" _inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin']", "# user_id change: update date_assign if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now()", "the followed project and tasks.\\n\" \"- All internal users: employees", "if task.project_id: if task.project_id not in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id,", "linked to this task. Usually less or equal to the", "people will receive email.\", index=True, compute='_compute_email_from', store=\"True\", readonly=False) allowed_user_ids =", "in the kanban view when there are no records in", "may see everything.\" \" Portal users may see project and", "task: task.project_id.resource_calendar_id and task.create_date ) for task in task_linked_to_calendar: dt_create_date", "will also be updated.') elif will_write_email: task.ribbon_message = _('By saving", "value displayed for the done state for kanban selection, when", "task.recurring_task and task.repeat_unit == 'week' task.repeat_show_month = task.recurring_task and task.repeat_unit", "task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone',", "action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id',", "task.recurrence_message += '</ul>' if task.repeat_type == 'until': task.recurrence_message += _('<p><em>Number", "= now # recurrence fields rec_fields = vals.keys() & self._get_recurrence_fields()", "return [fn(n) for day, fn in DAYS.items() if self[day]] return", "task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail gateway #", "('partner_id', '=', False), ('email_from', '=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id})", "good feedback from the customer will update the kanban state", "stage_id \"\"\" project_id = self.env.context.get('default_project_id') if not project_id: return False", "self: if task.kanban_state == 'normal': task.kanban_state_label = task.legend_normal elif task.kanban_state", "Add the users subscribed to allowed portal users \"\"\" res", "_compute_kanban_state_label(self): for task in self: if task.kanban_state == 'normal': task.kanban_state_label", "UserError(_('You cannot delete a project containing tasks. You can either", "'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata: pdata['type'] ==", "task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return recipients def _notify_email_header_dict(self): headers =", "without removing it.\") sequence = fields.Integer(default=10, help=\"Gives the sequence order", "search_domain = [('|')] * (len(section_ids) - 1) for section_id in", "_compute_recurring_count(self): self.recurring_count = 0 recurring_tasks = self.filtered(lambda l: l.recurrence_id) count", "'project.task'), ('res_id', 'in', project.task_ids.ids) ]) def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id',", "self.env.ref('project.group_project_user').id group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id", "partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) ribbon_message", "'portal': task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -=", "analytic_account.id}) # --------------------------------------------------- # Rating business # --------------------------------------------------- # This", "fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string=\"Recurrent\") recurring_count = fields.Integer(string=\"Tasks in Recurrence\", compute='_compute_recurring_count')", "new stage' (green bullet).\\n\" \" * A medium or a", "def _compute_partner_id(self): \"\"\" If a task has no partner_id, use", "if 'kanban_state_label' in init_values and self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked')", "will also be updated.') elif will_write_phone: task.ribbon_message = _('By saving", "self: self.env.company) currency_id = fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\", readonly=True) analytic_account_id =", "= super(Project, self).copy(default) if self.subtask_project_id == self: project.subtask_project_id = project", "task.working_hours_close = 0.0 task.working_days_close = 0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open',", "if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result =", "- 1) for section_id in section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain", "[(6, 0, tasks.ids)]}) @api.returns('self', lambda value: value.id) def copy(self, default=None):", "in project.task_ids: task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks' in vals and", "fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator=\"avg\") # customer portal:", "set and if the project's rating configuration is 'Rating when", "[] if section_ids: search_domain = [('|')] * (len(section_ids) - 1)", "group_name in ('customer', 'user') or group_name == 'portal_customer' and not", "of Repetitions'), ], default=\"forever\", string=\"Until\", compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string=\"End", "('day', 'Day of the Month'), ], default='date', compute='_compute_repeat', readonly=False) repeat_on_year", "'view_mode': 'form', 'res_model': 'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context,", "self.allowed_internal_user_ids |= internal_users return res def message_unsubscribe(self, partner_ids=None, channel_ids=None): \"\"\"", "fields.Char(string='Title', tracking=True, required=True, index=True) description = fields.Html(string='Description') priority = fields.Selection([", "index=True) date_end = fields.Datetime(string='Ending Date', index=True, copy=False) date_assign = fields.Datetime(string='Assigning", "'Second'), ('third', 'Third'), ('last', 'Last'), ], default='first', compute='_compute_repeat', readonly=False) repeat_weekday", "the lead: - section_id: if set, stages must belong to", "'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda pdata: pdata['type']", "True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Number of documents attached\") date_start =", "= self.filtered(lambda l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'],", "to the email. \"\"\" # remove default author when going", "compute='_compute_repeat', readonly=False) thu = fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False) fri = fields.Boolean(string=\"Fri\",", "action['domain'] = [('account_id', '=', self.analytic_account_id.id)] return action def action_view_all_rating(self): \"\"\"", "self: portal_users = task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids - portal_users if", "context, with all default values as 'quick_create' does not contains", "step.\") fold = fields.Boolean(string='Folded in Kanban', help='This stage is folded", "= super(Task, self).rating_get_partner_id() if not res and self.project_id.partner_id: return self.project_id.partner_id", "required=True, index=True) description = fields.Html(string='Description') priority = fields.Selection([ ('0', 'Normal'),", "= task.allowed_user_ids - portal_users if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |=", "fields.Boolean('Active', default=True) name = fields.Char(string='Stage Name', required=True, translate=True) description =", "def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval > 0 and\\ (not self.repeat_show_dow", "project: project.privacy_visibility == 'portal'): project.allowed_user_ids |= project.partner_id.user_ids return res def", "fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban", "_create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic Account')),", "tasks.')) # Delete the empty related analytic account analytic_accounts_to_delete =", "Handle project users and managers recipients that can assign tasks", "only direct children # If depth == 3, return children", "specific one) # on a document without customer means that", "children return children + children._get_all_subtasks(depth - 1) def action_open_parent_task(self): return", "'August'), ('september', 'September'), ('october', 'October'), ('november', 'November'), ('december', 'December'), ],", "None if not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes)", "the project to 'Visible by following customers' in order to", "if any(self.mapped('recurrence_id')): # TODO: show a dialog to stop the", "self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)]) @api.model def _default_company_id(self):", "analytic account analytic_accounts_to_delete = self.env['account.analytic.account'] for project in self: if", "_compute_access_warning(self): super(Project, self)._compute_access_warning() for project in self.filtered(lambda x: x.privacy_visibility !=", "means that it was created through the chatter using #", "recurrence_update = fields.Selection([ ('this', 'This task'), ('subsequent', 'This and following", "no other responsible is # found. create_context = dict(self.env.context or", "vals.get('partner_id') or vals.get('privacy_visibility'): for project in self.filtered(lambda project: project.privacy_visibility ==", "_('By saving this change, the customer email will also be", "to the email. \"\"\" email_list = self.email_split(msg) partner_ids = [p.id", "\"\"\" project = self.browse(new_project_id) tasks = self.env['project.task'] # We want", "reaches this step.\") auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False, help=\"Automatically", "new_allowed_users]}) return res # ---------------------------------------- # Case management # ----------------------------------------", "sub-tasks linked to this task. Usually less or equal to", "Email Template', domain=[('model', '=', 'project.task')], help=\"If set and if the", "task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread", "vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() # Stage change: Update date_end if", "takes all existing ratings _check_company_auto = True def _compute_attached_docs_count(self): Attachment", "task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()): date = fields.Date.today()", "document according to the email. \"\"\" # remove default author", "email associated with this project. Incoming emails are automatically synchronized", "active_test context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks", "'company_id': project.company_id.id, } def map_tasks(self, new_project_id): \"\"\" copy and map", "return False return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)])", "-= internal_users @api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar = self.filtered(", "'allowed_portal_users', lambda pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids,", "stage: an email will be sent when a task is", "[p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p] self.message_subscribe(partner_ids)", "} def map_tasks(self, new_project_id): \"\"\" copy and map tasks from", "for project in self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share')", "in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'): portal_users = task.allowed_user_ids.filtered('share') if", "\"\"\" _name = \"project.tags\" _description = \"Project Tags\" def _get_default_color(self):", "def _inverse_partner_phone(self): for project in self: if project.partner_id and project.partner_phone", "= defaults = ast.literal_eval(self.alias_defaults or \"{}\") defaults['project_id'] = self.id return", "== 'year' and task.repeat_on_year == 'day') task.repeat_show_dow = task.recurring_task and", "# want the gateway user to be responsible if no", "fields.Selection([ ('date', 'Date of the Month'), ('day', 'Day of the", "task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility != 'followers':", "fields.Integer(string='Color Index') user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user,", "records=None, company=company, doc_names=None) res = {task.id: aliases.get(task.project_id.id) for task in", "super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If depth", "Stage', help=\"Tasks in this stage are considered as closed.\") disabled_rating_warning", "string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard', help=\"Whether", "fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number',", "def _compute_ribbon_message(self): for task in self: will_write_email = task.partner_id and", "('message_type', 'in', ['email', 'comment'])]) # recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks')", "change: update date_last_stage_update if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] =", "= [] if section_ids: search_domain = [('|')] * (len(section_ids) -", "index=True, copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage Update', index=True, copy=False,", "You can either archive it or first delete all of", "explicitly set user_id to False; however we do not #", "Frequency', required=True, default='monthly') _sql_constraints = [ ('project_date_greater', 'check(date >= date_start)',", "'|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id', '=',", "compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'), ('wed',", "is automatically changed also. 2) if the parent task partner_id", "child_task in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for task in self:", "task.repeat_number if task.repeat_type == 'after' else 5) delta = task.repeat_interval", "the recipient(s) because the privacy of the project is too", "projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name = \"project.task\" _description", "== 'normal': task.kanban_state_label = task.legend_normal elif task.kanban_state == 'blocked': task.kanban_state_label", "fields.Html() active = fields.Boolean(default=True, help=\"If the active field is set", "will give default subtask field in `default_get` 'default_company_id': default_project.company_id.id if", "defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults) old_to_new_tasks[task.id] = new_task.id", "_('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window',", "fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline =", "of the time planned of all the sub-tasks linked to", "def _compute_access_url(self): super(Task, self)._compute_access_url() for task in self: task.access_url =", "task.ribbon_message = _('By saving this change, the customer email and", "set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task in self: portal_users", "not \"company_id\" in vals: vals[\"company_id\"] = self.env[\"project.project\"].browse( project_id ).company_id.id or", "fields.Boolean(string='Folded in Kanban', help='This stage is folded in the kanban", "it on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or", "'active': True, }) project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- # Rating business", "'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals if allowed_users_changed: allowed_users", "= fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False) is_closed = fields.Boolean(related=\"stage_id.is_closed\",", "task reaches this step.\") auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False,", "('customer', 'user') or group_name == 'portal_customer' and not portal_privacy: group_data['has_button_access']", "for project in self: project.is_favorite = self.env.user in project.favorite_user_ids def", "init_values and self.kanban_state == 'done': return self.env.ref('project.mt_task_ready') elif 'stage_id' in", "ast.literal_eval(self.alias_defaults or \"{}\") defaults['project_id'] = self.id return values # ---------------------------------------------------", "{ 'name': msg.get('subject') or _(\"No Subject\"), 'email_from': msg.get('from'), 'planned_hours': 0.0,", "super(Task, self)._message_get_suggested_recipients() for task in self: if task.partner_id: reason =", "def create(self, vals): # Prevent double project creation self =", "author when going through the mail gateway. Indeed we #", "'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]\", string='Cover Image') legend_blocked", "channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values = super(Project,", "'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context, create=False) } def", "hours to assign', store=True, group_operator=\"avg\") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours", "'project_id', 'type_id', string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count', string=\"Task Count\") task_ids", "_rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model): \"\"\" Tags of project's tasks", "= fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda", "= fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([ ('day',", "for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p] task.message_subscribe(partner_ids) return", "def _check_parent_id(self): for task in self: if not task._check_recursion(): raise", "tracking=True) partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain=\"['|', ('company_id',", "limit=1).id # ------------------------------------------------ # CRUD overrides # ------------------------------------------------ @api.model def", "disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False) #", "existing active tasks when subscribing to a project And add", "'Third'), ('last', 'Last'), ], default='first', compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([", "is_closed = fields.Boolean('Closing Stage', help=\"Tasks in this stage are considered", "closed.\") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False)", "children: children.active) if not children: return self.env['project.task'] if depth ==", "if task.repeat_unit == 'day' else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date", "self: task.access_url = '/my/task/%s' % task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning()", "dict(msg_vals or {}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func = lambda", "h in headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0, 'project.project-%s, ' %", "if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until',", "default value for the copied task on project duplication \"\"\"", "stage.') legend_done = fields.Char( 'Green Kanban Label', default=lambda s: _('Ready'),", "task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end', 'date_assign') def", "action_context = ast.literal_eval(action['context']) if action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name'] =", "the time planned of all the sub-tasks linked to this", "for task in self: if not task._check_recursion(): raise ValidationError(_('Error! You", "rec_fields: if task.recurrence_id: task[f] = task.recurrence_id[f] else: if task.recurring_task: task[f]", "['email', 'comment'])]) # recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task =", "= \"sequence, name, id\" _rating_satisfaction_days = False # takes all", "new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals) def", "= super(Task, tasks).write(vals) # rating on stage if 'stage_id' in", "rating_status = fields.Selection( [('stage', 'Rating when changing stage'), ('periodic', 'Periodical", "task.ribbon_message = _('By saving this change, the customer email will", "# Case management # ---------------------------------------- def stage_find(self, section_id, domain=[], order='sequence'):", "'followers': task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar", "task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from,", "On\", readonly=True, index=True) write_date = fields.Datetime(\"Last Updated On\", readonly=True, index=True)", "project.partner_id.phone def _inverse_partner_phone(self): for project in self: if project.partner_id and", "('until', 'End Date'), ('after', 'Number of Repetitions'), ], default=\"forever\", string=\"Until\",", "= fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id,", "reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return recipients def", "datetime import timedelta, datetime from random import randint from odoo", "or {}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func = lambda pdata:", "task partner_id is automatically changed also. 2) if the parent", "Once the task partner_id has been set: 1) if the", "default='portal', help=\"Defines the visibility of the tasks of the project:\\n\"", "access to the document. \"\"\" groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals", "record cost and revenue on your project.\") favorite_user_ids = fields.Many2many(", "== 'portal'): project.allowed_user_ids |= project.partner_id.user_ids return res def action_unlink(self): wizard", "= fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain=\"['|', ('company_id', '=', False), ('company_id',", "[(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6, 0,", "or issue is in that stage.') legend_done = fields.Char( 'Green", "cannot create recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for", "of their company.\") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids =", "return tasks def write(self, vals): now = fields.Datetime.now() if 'parent_id'", "Month'), ('monthly', 'Once a Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating", "init_values): self.ensure_one() if 'kanban_state_label' in init_values and self.kanban_state == 'blocked':", "Label', default=lambda s: _('In Progress'), translate=True, required=True, help='Override the default", "or 'allowed_internal_user_ids' in vals if allowed_users_changed: allowed_users = {project: project.allowed_user_ids", "defaults['project_id'] = self.id return values # --------------------------------------------------- # Actions #", "'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')): # TODO:", "achieve this task (including its sub-tasks).', tracking=True) subtask_planned_hours = fields.Float(\"Sub-tasks", "LICENSE file for full copyright and licensing details. import ast", "result = dict((data['project_id'][0], data['project_id_count']) for data in task_data) for project", "internal_users = task.allowed_user_ids - portal_users if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids", "0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id',", "action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id', '=', self.analytic_account_id.id)] return", "Tracker module is installed).\") privacy_visibility = fields.Selection([ ('followers', 'Invited internal", "duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close =", "'Forever'), ('until', 'End Date'), ('after', 'Number of Repetitions'), ], default=\"forever\",", "a stage even if the project is not assigned to", "def action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return {", "= super(Task, self)._notify_email_header_dict() if self.project_id: current_objects = [h for h", "rec_fields and vals.get('recurring_task') is True: rec_values = {rec_field: vals[rec_field] for", "or by someone of their company.\") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users',", "super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Add the", "defaults.get(f) else: task[f] = False def _get_weekdays(self, n=1): self.ensure_one() if", "string=\"Number of documents attached\") date_start = fields.Date(string='Start Date') date =", "tasks from old to new project \"\"\" project = self.browse(new_project_id)", "= fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False, required=True, copy=True, default=_default_company_id)", "or project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return {'date_end': False} def unlink(self):", "'<li>...</li>' task.recurrence_message += '</ul>' if task.repeat_type == 'until': task.recurrence_message +=", "altered during copy defaults = self._map_tasks_default_valeus(task, project) if task.parent_id: #", "pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and pdata['id']", "= fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel',", "string=\"Allowed Portal Users\", domain=[('share', '=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Number", "emails. Also give access button to portal users and portal", "for task in self: portal_users = task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids", "_track_template(self, changes): res = super(Task, self)._track_template(changes) test_task = self[0] if", "def _track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label' in init_values and self.kanban_state", "return self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values: return self.env.ref('project.mt_task_stage') return super(Task,", "5) delta = task.repeat_interval if task.repeat_unit == 'day' else 1", "archiving/unarchiving a project does it on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active':", "(self._name, self.id) return action def _compute_is_favorite(self): for project in self:", "help=\"Tasks in this stage are considered as closed.\") disabled_rating_warning =", "not self.alias_enabled: self.alias_name = False def _compute_alias_enabled(self): for project in", "'=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]\", string='Cover Image')", "not self._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of", "def _compute_disabled_rating_warning(self): for stage in self: disabled_projects = stage.project_ids.filtered(lambda p:", "def _create_analytic_account(self): for project in self: analytic_account = self.env['account.analytic.account'].create({ 'name':", "tasks. (%s)\", user_names)) def _compute_attachment_ids(self): for task in self: attachment_ids", "= fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char(", "['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else: recurrence_domain =", "task.\")) if 'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')):", "res def action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return", "%s') % (self.name,) action_context = ast.literal_eval(action['context']) if action['context'] else {}", "sent to the customer when the task or issue reaches", "readonly=False) repeat_unit = fields.Selection([ ('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'),", "found. create_context = dict(self.env.context or {}) create_context['default_user_id'] = False if", "default value displayed for the normal state for kanban selection,", "self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self, message, msg_vals):", "task.working_days_close = duration_data['days'] else: task.working_hours_close = 0.0 task.working_days_close = 0.0", "required=True, default='monthly') _sql_constraints = [ ('project_date_greater', 'check(date >= date_start)', 'Error!", "user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment',", "assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator=\"avg\")", "else None active = fields.Boolean('Active', default=True) name = fields.Char(string='Stage Name',", "[self.env.uid])] name = fields.Char(\"Name\", index=True, required=True, tracking=True) description = fields.Html()", "in order to make it accessible by the recipient(s).\") @api.depends('child_ids.planned_hours')", "= self.project_id.subtask_project_id or self.project_id ctx = dict(self.env.context) ctx = {k:", "or equal to the initially time planned of this task.\")", "fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True) company_id = fields.Many2one( 'res.company', string='Company',", "required=True, default='portal', help=\"Defines the visibility of the tasks of the", "if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_phone = task.partner_id.phone def", "= fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'),", "allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed Internal Users\", default=lambda self: self.env.user,", "recurrence raise UserError(_('You cannot delete recurring tasks. Please, disable the", "return self.env.company @api.model def _read_group_stage_ids(self, stages, domain, order): search_domain =", "_('Delete Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type':", "def message_unsubscribe(self, partner_ids=None, channel_ids=None): \"\"\" Unsubscribe from all tasks when", "sub.default)).ids if project_subtypes else None if not subtype_ids or task_subtypes:", "!= 'portal': task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids", "copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False)", "@api.constrains('parent_id') def _check_parent_id(self): for task in self: if not task._check_recursion():", "in vals and not vals.get('active') and any(self.mapped('recurrence_id')): # TODO: show", "if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() # recurrence rec_fields =", "'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]}) return res", "else True if allowed_users_changed: for project in self: permission_removed =", "related='project_id.user_id', readonly=True) company_id = fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False,", "'parent_id' in vals and vals['parent_id'] in self.ids: raise UserError(_(\"Sorry. You", "displaying a list of tasks.\") stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id',", "recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every', default=1,", "---------------------------------------- # Case management # ---------------------------------------- def stage_find(self, section_id, domain=[],", "readonly=False) repeat_on_year = fields.Selection([ ('date', 'Date of the Year'), ('day',", "priority = fields.Selection([ ('0', 'Normal'), ('1', 'Important'), ], default='0', index=True,", "import ast from datetime import timedelta, datetime from random import", "name: tname = name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname,", "Blocked Explanation', readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation',", "stage in self: disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active) if", "# from mail_thread task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))]", "self = self.with_context(active_test=False) # retrieves all the projects with a", "search_domain stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active =", "def write(self, vals): if 'active' in vals and not vals['active']:", "self).copy(default) if self.subtask_project_id == self: project.subtask_project_id = project for follower", "will set the kanban state to 'blocked' (red bullet).\\n\") is_closed", "Task(models.Model): _name = \"project.task\" _description = \"Task\" _date_name = \"date_assign\"", "create recursive hierarchy of task(s).')) @api.model def get_empty_list_help(self, help): tname", "the customer email and phone number will also be updated.')", "else the parent task partner_id. Once the task partner_id has", "not # want the gateway user to be responsible if", "dict((data['project_id'][0], data['project_id_count']) for data in task_data) for project in self:", "see all the analytic lines of the project's analytic account", "False if custom_values is None: custom_values = {} defaults =", "self.filtered(lambda task: task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for", "def toggle_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in", "subtype_ids=task_subtypes) if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users", "message, msg_vals): if message.attachment_ids and not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda", "fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True) create_date = fields.Datetime(\"Created On\", readonly=True,", "], default='0', index=True, string=\"Priority\") sequence = fields.Integer(string='Sequence', index=True, default=10, help=\"Gives", "partner_ids=None, channel_ids=None): \"\"\" Unsubscribe from all tasks when unsubscribing from", "'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], }", "privacy_visibility = fields.Selection([ ('followers', 'Invited internal users'), ('employees', 'All internal", "def message_update(self, msg, update_vals=None): \"\"\" Override to update the task", "def _creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label'", "task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return tasks def write(self, vals): now", "fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([ ('day', 'Days'),", "not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False}) if", "portal users and all internal users'), ], string='Visibility', required=True, default='portal',", "'day') or (task.repeat_unit == 'year' and task.repeat_on_year == 'day') task.repeat_show_dow", "default='date', compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([ ('date', 'Date of the", "None: default = {} if not default.get('name'): default['name'] = _(\"%s", "a dialog to stop the recurrence raise UserError(_('You cannot archive", "document without customer means that it was created through the", "task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar =", "elapsed between record creation and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working", "= [] if recurrence_update == 'subsequent': for task in self:", "\"- Invited portal and all internal users: employees may see", "project partner_id changes, the task partner_id is automatically changed also.", "sequence = fields.Integer(string='Sequence', index=True, default=10, help=\"Gives the sequence order when", "+ timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self, task, project): \"\"\" get", "all internal users: employees may see everything.\" \" Portal users", "users to see the project's tasks. (%s)\", user_names)) def _compute_attachment_ids(self):", "# update context, with all default values as 'quick_create' does", "Rating business # --------------------------------------------------- # This method should be called", "l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count", "portal customers. If they are notified they should probably have", "the first found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------ #", "in self: task.access_url = '/my/task/%s' % task.id def _compute_access_warning(self): super(Task,", "domain, order): search_domain = [('id', 'in', stages.ids)] if 'default_project_id' in", "days to assign', store=True, group_operator=\"avg\") working_days_close = fields.Float(compute='_compute_elapsed', string='Working days", "= fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator=\"avg\") # customer", "task.allowed_user_ids - portal_users if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids", "if section_ids: search_domain = [('|')] * (len(section_ids) - 1) for", "% (self.name,) action_context = ast.literal_eval(action['context']) if action['context'] else {} action_context.update(self._context)", "self: project.subtask_project_id = project for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids)", "for task in self: if task.partner_id: if task.project_id.partner_id: task.partner_id =", "self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'): portal_users = task.allowed_user_ids.filtered('share') if portal_users:", "can be the current project itself.\") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda", "in self: task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and", "translate=True, required=True, help='Override the default value displayed for the blocked", "custom_values=None): \"\"\" Overrides mail_thread message_new that is called by the", "tasks when subscribing to a project And add the portal", "= fields.Datetime(\"Last Updated On\", readonly=True, index=True) date_end = fields.Datetime(string='Ending Date',", "= project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if not self.alias_enabled: self.alias_name =", "= fields.Datetime(string='Ending Date', index=True, copy=False) date_assign = fields.Datetime(string='Assigning Date', index=True,", "project:\\n\" \"- Invited internal users: employees may only see the", "('fold', '=', False), ('is_closed', '=', False)]) else: task.stage_id = False", "to record cost and revenue on your project.\") favorite_user_ids =", "stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True, index=True,", "legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False) is_closed =", "'mail.template', string='Email Template', domain=[('model', '=', 'project.task')], help=\"If set an email", "ctx return action def action_recurring_tasks(self): return { 'name': 'Tasks in", "default stages \"\"\" # collect all section_ids section_ids = []", "'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month',", "= fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([ ('this', 'This task'), ('subsequent',", "True def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for project in self:", "the privacy to 'Visible by following customers' in order to", "|= project.partner_id.user_ids return res def action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids':", "all(d in default_fields for d in days): vals[days[week_start]] = True", "'Twice a Month'), ('monthly', 'Once a Month'), ('quarterly', 'Quarterly'), ('yearly',", "# --------------------------------------------------- # Business Methods # --------------------------------------------------- @api.model def _create_analytic_account_from_values(self,", "desc\" _check_company_auto = True def _get_default_stage_id(self): \"\"\" Gives default stage_id", "readonly=True, related_sudo=False) is_closed = fields.Boolean(related=\"stage_id.is_closed\", string=\"Closing Stage\", readonly=True, related_sudo=False) parent_id", "action['domain'] = str([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', 'in',", "stage'), ('periodic', 'Periodical Rating') ], 'Customer Ratings Status', default=\"stage\", required=True,", "task.repeat_week, task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for", "displayed for the blocked state for kanban selection, when the", "readonly=True) project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True,", "message with a specified recipient (not a follower, a specific", "portal users \"\"\" res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if", "and self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values", "'portal'): task.access_warning = _( \"The task cannot be shared with", "# archiving/unarchiving a project does it on its tasks, too", "get customer feedback?\\n\" \"- Rating when changing stage: an email", "state to 'blocked' (red bullet).\\n\") is_closed = fields.Boolean('Closing Stage', help=\"Tasks", "= super(Task, self).default_get(default_fields) days = list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if", "compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True, check_company=True, change_default=True) planned_hours = fields.Float(\"Initially", "('company_id', '=', False), ('company_id', '=', company_id)]\") partner_email = fields.Char( compute='_compute_partner_email',", "channel_ids=channel_ids) def _alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id", "readonly=False) tue = fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False) wed = fields.Boolean(string=\"Wed\", compute='_compute_repeat',", "'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue',", "repeat_until = fields.Date(string=\"End Date\", compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string=\"Repetitions\", default=1,", "res = {task.id: aliases.get(task.project_id.id) for task in self} leftover =", "= {k: v for k, v in ctx.items() if not", "the task or issue is in that stage.') legend_normal =", "synchronized \" \"with Tasks (or optionally Issues if the Issue", "('company_id', '=', company_id)]\") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id')", "-*- coding: utf-8 -*- # Part of Odoo. See LICENSE", "Activities\") resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids =", "'image')]\", string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True,", "\"Task\" _date_name = \"date_assign\" _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin']", "'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': self.env.context, } def", "creation of tasks # 2) Ensure the defaults are correct", "default=None): if default is None: default = {} if not", "help=\"If set and if the project's rating configuration is 'Rating", "ProjectTags(models.Model): \"\"\" Tags of project's tasks \"\"\" _name = \"project.tags\"", "see all the rating of the project and activate default", "def unlink(self): if any(self.mapped('recurrence_id')): # TODO: show a dialog to", "task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |=", "task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id", "fields.Selection([ ('date', 'Date of the Year'), ('day', 'Day of the", "'quick_create' does not contains all field in its view if", "write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]})", "from a project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)", "# ------------------------------------------------ # CRUD overrides # ------------------------------------------------ @api.model def default_get(self,", "stage even if the project is not assigned to the", "--------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): \"\"\" If a task has", "self.recurrence_id.ids)], } # --------------------------------------------------- # Rating business # --------------------------------------------------- def", "On\", readonly=True, index=True) date_end = fields.Datetime(string='Ending Date', index=True, copy=False) date_assign", "'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for", "], string='Visibility', required=True, default='portal', help=\"Defines the visibility of the tasks", "for the normal state for kanban selection, when the task", "users may see project and tasks followed by\\n\" \" them", "_compute_alias_enabled(self): for project in self: project.alias_enabled = project.alias_domain and project.alias_id.alias_name", "string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False)", "color = fields.Integer(string='Color Index') user_id = fields.Many2one('res.users', string='Project Manager', default=lambda", "raise UserError(_('You cannot archive recurring tasks. Please, disable the recurrence", "all subtasks of current task action['domain'] = [('id', 'child_of', self.id),", "propagate an active_test context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)],", "groups.insert(0, ( 'allowed_portal_users', lambda pdata: pdata['type'] == 'portal' and pdata['id']", "action to see all the analytic lines of the project's", "in which sub-tasks of the current project will be created.", "= 0.0 task.working_days_open = 0.0 if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end)", "when there are no records in that stage to display.')", "in self.ids: raise UserError(_(\"Sorry. You can't set a task as", "or issue is in that stage.') legend_normal = fields.Char( 'Grey", "on your dashboard.\") label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help=\"Label", "if project_id and not \"company_id\" in vals: vals[\"company_id\"] = self.env[\"project.project\"].browse(", "if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self, message,", "user_id change: update date_assign if vals.get('user_id') and 'date_assign' not in", "and all internal users: employees may see everything.\" \" Portal", "('week', 'Weeks'), ('month', 'Months'), ('year', 'Years'), ], default='week', compute='_compute_repeat', readonly=False)", "= fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True) subtask_count = fields.Integer(\"Sub-task count\", compute='_compute_subtask_count')", "fields.Char( 'Red Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True, help='Override", "default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month' in default_fields: vals['repeat_month'] =", "project \"\"\" project = self.browse(new_project_id) tasks = self.env['project.task'] # We", "fields.Datetime.now() # Stage change: Update date_end if folded stage and", "if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id') def", "the recipient(s).\") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task in self: task.subtask_planned_hours", "to see the project's tasks. (%s)\", user_names)) def _compute_attachment_ids(self): for", "the project and activate default filters\"\"\" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name']", "avoid ugly hacks in JS. new_partner = message.partner_ids.filtered(lambda partner: partner.email", "'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals) #", "receive email.\", index=True, compute='_compute_email_from', store=\"True\", readonly=False) allowed_user_ids = fields.Many2many('res.users', string=\"Visible", "project in self: project.task_count = result.get(project.id, 0) def attachment_tree_view(self): action", "= self.filtered( lambda task: task.project_id.resource_calendar_id and task.create_date ) for task", "archive it or first delete all of its tasks.')) #", "fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self:", "'read' _order = \"priority desc, sequence, id desc\" _check_company_auto =", "can't set a task as its parent task.\")) if 'active'", "task_ids = fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=', False), ('stage_id',", "fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char( 'Red", "as closed.\") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self =", "_get_default_stage_id or _stage_find), if project_id not in default_stage: default_stage[project_id] =", "kanban_state = fields.Selection([ ('normal', 'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')],", "[('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])]) # recurrence fields", "map_tasks(self, new_project_id): \"\"\" copy and map tasks from old to", "{'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id', '=', self.analytic_account_id.id)] return action def", "'res_model': 'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], } #", "action def _compute_is_favorite(self): for project in self: project.is_favorite = self.env.user", "rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} for task", "project): \"\"\" get the default value for the copied task", "for project in self: project.task_count = result.get(project.id, 0) def attachment_tree_view(self):", "default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks'))", "- portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |= internal_users return res", "vals and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self recurrence_update =", "if project.partner_id and project.partner_email != project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone')", "if self.subtask_project_id == self: project.subtask_project_id = project for follower in", "if 'stage_id' in vals and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and", "= fields.Char( 'Grey Kanban Label', default=lambda s: _('In Progress'), translate=True,", "visibility setting doesn't allow portal users to see the project's", "called once a day by the scheduler @api.model def _send_rating_all(self):", "res # ---------------------------------------- # Case management # ---------------------------------------- def stage_find(self,", "= fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False) sat = fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False) sun", "('monthly', 'Once a Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency',", "fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) ribbon_message = fields.Char('Ribbon", "def _inverse_allowed_user(self): for project in self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids", "your project.\") favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids,", "tname = name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, )", "api, fields, models, tools, SUPERUSER_ID, _ from odoo.exceptions import UserError,", "= 'sequence, id' def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return [default_project_id]", "vals[days[week_start]] = True if 'repeat_day' in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day)", "else None task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or", "portal: include comment and incoming emails in communication history website_message_ids", "TODO: show a dialog to stop the recurrence raise UserError(_('You", "= self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def create(self, vals_list): default_stage =", "= True if 'repeat_day' in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if", "for project in self: if project.partner_id and project.partner_id.email != project.partner_email:", "customer email will also be updated.') elif will_write_phone: task.ribbon_message =", "empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):", "project_id = vals.get('project_id') or self.env.context.get('default_project_id') if project_id and not \"company_id\"", "dt_date_end = fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close =", "'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri',", "project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for project in self:", "name = fields.Char(string='Title', tracking=True, required=True, index=True) description = fields.Html(string='Description') priority", "Updated On\", readonly=True, index=True) date_end = fields.Datetime(string='Ending Date', index=True, copy=False)", "'in', ['email', 'comment'])]) # recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task", "current project will be created. It can be the current", "the parent task partner_id. Once the task partner_id has been", "project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self): for project in self: if", "= _(\"%s (copy)\", self.name) if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return", "compute is_favorite to dodge allow write access right if 'is_favorite'", "('date', 'Date of the Month'), ('day', 'Day of the Month'),", "do not want to explicitly set user_id to False; however", "# Computed field about working time elapsed between record creation", "'date') task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month", "repeat_on_year = fields.Selection([ ('date', 'Date of the Year'), ('day', 'Day", "\"Project Tags\" def _get_default_color(self): return randint(1, 11) name = fields.Char('Name',", "for task in self: task.email_from = task.partner_id.email or ((task.partner_id or", "the kanban state when the customer replies to the feedback", "allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in", "if new_partner: self.search([ ('partner_id', '=', False), ('email_from', '=', new_partner.email), ('stage_id.fold',", "\" * A medium or a bad feedback will set", ">= date_start)', 'Error! project start-date must be lower than project", "not portal_privacy: group_data['has_button_access'] = False elif group_name == 'portal_customer' and", "_compute_repeat_visibility(self): for task in self: task.repeat_show_day = task.recurring_task and (task.repeat_unit", "}) action['context'] = ctx return action def action_recurring_tasks(self): return {", "@api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count = 0 recurring_tasks = self.filtered(lambda l:", "= task.stage_find(task.project_id.id, [ ('fold', '=', False), ('is_closed', '=', False)]) else:", "the current project itself.\") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project'))", "translate=True, required=True, help='Override the default value displayed for the normal", "self: if task.partner_id: reason = _('Customer Email') if task.partner_id.email else", "'January'), ('february', 'February'), ('march', 'March'), ('april', 'April'), ('may', 'May'), ('june',", "[(4, user.id) for user in new_allowed_users]}) return res # ----------------------------------------", "lambda pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids, {}", "v for k, v in ctx.items() if not k.startswith('search_default_')} ctx.update({", "Users\", domain=[('share', '=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Number of documents", "'<li>%s</li>' % date.strftime(date_format) if task.repeat_type == 'after' and task.repeat_number >", "'partner_phone', 'partner_id') def _compute_ribbon_message(self): for task in self: will_write_email =", "fields.Integer(string=\"Tasks in Recurrence\", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update =", "= fields.Boolean('Closing Stage', help=\"Tasks in this stage are considered as", "that stage.') legend_normal = fields.Char( 'Grey Kanban Label', default=lambda s:", "fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility')", "['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed',", "_get_default_color(self): return randint(1, 11) name = fields.Char('Name', required=True) color =", "not in vals: vals['date_assign'] = now # recurrence fields rec_fields", "ctx = {k: v for k, v in ctx.items() if", "the task or issue reaches this step.\") fold = fields.Boolean(string='Folded", "document. \"\"\" groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or", "'project.task'), ('res_id', 'in', self.task_ids.ids) ]) action['context'] = \"{'default_res_model': '%s','default_res_id': %d}\"", "test_task = self[0] if 'stage_id' in changes and test_task.stage_id.mail_template_id: res['stage_id']", "'date_assign') def _compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda task: task.project_id.resource_calendar_id and", "rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks", "= fields.Boolean(default=True, help=\"If the active field is set to False,", "result.get(project.id, 0) def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([", "tasks \"\"\" _name = \"project.tags\" _description = \"Project Tags\" def", "{'date_end': False} def unlink(self): if any(self.mapped('recurrence_id')): # TODO: show a", "partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users =", "self.project_id.company_id != self.company_id: self.project_id = False @api.depends('project_id.company_id') def _compute_company_id(self): for", "= self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold',", "= fields.Html(string='Description') priority = fields.Selection([ ('0', 'Normal'), ('1', 'Important'), ],", "in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg,", "in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until' in", "= self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if 'recurring_task' in vals and", "self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id = project.id if project.privacy_visibility ==", "tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage Update', index=True, copy=False, readonly=True) project_id", "in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month' in default_fields: vals['repeat_month']", "fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit',", "self.with_context(active_id=self.id, active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0] action['display_name'] = self.name return", "(self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state')", "vals: # 1) Allows keeping the batch creation of tasks", "= self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for date in recurring_dates[:5]: task.recurrence_message", "= fields.Date(string='Start Date') date = fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id", "= fields.Char(string='Stage Name', required=True, translate=True) description = fields.Text(translate=True) sequence =", "allow_subtasks = fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True) subtask_count = fields.Integer(\"Sub-task count\",", "desc, sequence, id desc\" _check_company_auto = True def _get_default_stage_id(self): \"\"\"", "def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None): \"\"\" Override to set", "will also be updated.') else: task.ribbon_message = False @api.constrains('parent_id') def", "task.working_hours_close = duration_data['hours'] task.working_days_close = duration_data['days'] else: task.working_hours_close = 0.0", "'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _order = \"priority desc, sequence,", "task.project_id.partner_id else: task.partner_id = task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def", "= fields.Integer(compute='_compute_task_count', string=\"Task Count\") task_ids = fields.One2many('project.task', 'project_id', string='Tasks', domain=['|',", "1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly':", "def _message_post_after_hook(self, message, msg_vals): if message.attachment_ids and not self.displayed_image_id: image_attachments", "def action_open_parent_task(self): return { 'name': _('Parent Task'), 'view_mode': 'form', 'res_model':", "using default get (instead of _get_default_stage_id or _stage_find), if project_id", "of the current project will be created. It can be", "('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids) ]) def _compute_task_count(self): task_data", "res def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate,", "vals['kanban_state'] = 'normal' # user_id change: update date_assign if vals.get('user_id')", "changes and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id':", "to dodge allow write access right if 'is_favorite' in vals:", "# --------------------------------------------------- def toggle_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for", "('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('last', 'Last'), ], default='first',", "task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'):", "= super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result def message_subscribe(self, partner_ids=None, channel_ids=None,", "return super().unlink() # --------------------------------------------------- # Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id')", "= self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of %s') % (self.name,) action_context", "= dict(self.env.context) ctx = {k: v for k, v in", "'stage_id': task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id, } def map_tasks(self, new_project_id):", "ProjectTaskType(models.Model): _name = 'project.task.type' _description = 'Task Stage' _order =", "tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task in self.filtered(lambda t: t.project_id.privacy_visibility", "for x in email_list if x.split('@')[0] not in aliases] @api.model", "string='Kanban State', copy=False, default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State", "= vals.keys() & self._get_recurrence_fields() if rec_fields: rec_values = {rec_field: vals[rec_field]", "project.partner_id.email != project.partner_email: project.partner_email = project.partner_id.email def _inverse_partner_email(self): for project", "@api.depends('partner_id.phone') def _compute_partner_phone(self): for project in self: if project.partner_id and", "once by project), # by using default get (instead of", "the normal state for kanban selection, when the task or", "not in vals: vals['kanban_state'] = 'normal' # user_id change: update", "stage to display.') rating_template_id = fields.Many2one( 'mail.template', string='Rating Email Template',", "res = super(Project, self).write(vals) if vals else True if allowed_users_changed:", "@api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for task in self:", "= [h for h in headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0,", "== 'user' and project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility == 'followers':", "id desc\" _check_company_auto = True def _get_default_stage_id(self): \"\"\" Gives default", "rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id()", "('november', 'November'), ('december', 'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility')", "return the action to see all the analytic lines of", "task in self: rating_template = task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang,", "group_operator=\"avg\") working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator=\"avg\")", "project is not assigned to the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id',", "task.ribbon_message = _('By saving this change, the customer phone number", "to get customer feedback?\\n\" \"- Rating when changing stage: an", "== 1, return only direct children # If depth ==", "and not self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url':", "that stage.') legend_done = fields.Char( 'Green Kanban Label', default=lambda s:", "return action def action_view_all_rating(self): \"\"\" return the action to see", "Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage', 'Rating when", "'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'), ('monthly', 'Once a", "project def write(self, vals): allowed_users_changed = 'allowed_portal_user_ids' in vals or", "no partner_id, use the project partner_id if any, or else", "field about working time elapsed between record creation and assignation/closing.", "show a dialog to stop the recurrence raise UserError(_('You cannot", "self.project_id: current_objects = [h for h in headers.get('X-Odoo-Objects', '').split(',') if", "partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain=\"['|', ('company_id', '=',", "'month' and task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and", "'date') or (task.repeat_unit == 'year' and task.repeat_on_year == 'date') task.repeat_show_week", "string='Working days to assign', store=True, group_operator=\"avg\") working_days_close = fields.Float(compute='_compute_elapsed', string='Working", "dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open =", "for project in self: analytic_account = self.env['account.analytic.account'].create({ 'name': project.name, 'company_id':", "displayed for the normal state for kanban selection, when the", "'March'), ('april', 'April'), ('may', 'May'), ('june', 'June'), ('july', 'July'), ('august',", "'parent_id', string=\"Sub-tasks\", context={'active_test': False}) subtask_project_id = fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task Project',", "------------------------------------------------ # CRUD overrides # ------------------------------------------------ @api.model def default_get(self, default_fields):", "'Once a Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True,", "pdata['groups'] if self.project_id.privacy_visibility == 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func =", "store=True, copy=False) company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company)", "and not vals.get('active') and any(self.mapped('recurrence_id')): # TODO: show a dialog", "'res_id': wizard.id, 'target': 'new', 'context': self.env.context, } def unlink(self): #", "= 0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0))", "to the customer when the task or issue reaches this", "name and stage, normally altered during copy defaults = self._map_tasks_default_valeus(task,", "project in self: project.doc_count = Attachment.search_count([ '|', '&', ('res_model', '=',", "allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids: task.allowed_user_ids -=", "'/my/task/%s' % task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning() for task in", "readonly=False, domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\") partner_is_company =", "} defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg)", "self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res = {task.id: aliases.get(task.project_id.id) for task", "readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility')", "self)._message_get_suggested_recipients() for task in self: if task.partner_id: reason = _('Customer", "'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show", "in vals_list: project_id = vals.get('project_id') or self.env.context.get('default_project_id') if project_id and", "self.env['project.task'] # We want to copy archived task, but do", "tasks.\\n\" \"- All internal users: employees may see all project", "'=', False), ('company_id', '=', company_id)]\", check_company=True, help=\"Analytic account to which", "fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)])", "a list of Projects.\") partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True,", "to a project And add the portal user subscribed to", "not default.get('name'): default['name'] = _(\"%s (copy)\") % (self.name) project =", "False), ('company_id', '=', company_id)]\") partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email',", "_check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('Error! You cannot create recursive", "tracking=True) alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False) alias_id =", "}) context = dict(self.env.context) context['stage_view'] = stage_view return { 'name':", "alias', compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\", required=True, help=\"Internal", "= self.env.context.get('default_project_id') return [default_project_id] if default_project_id else None active =", "fields.Integer(compute='_compute_attached_docs_count', string=\"Number of documents attached\") date_start = fields.Date(string='Start Date') date", "recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self): for task", "the task or issue is in that stage.') legend_done =", "_( \"The task cannot be shared with the recipient(s) because", "want the gateway user to be responsible if no other", "True)]).write({'recurring_task': False}) if 'active' in vals: # archiving/unarchiving a project", "--------------------------------------------------- # This method should be called once a day", "stage.disabled_rating_warning = False class Project(models.Model): _name = \"project.project\" _description =", "s: _('Ready'), translate=True, required=True, help='Override the default value displayed for", "'month' and task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and", "def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for project in self: project.doc_count", "assigned to the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'],", "left-part is not already an alias aliases = self.mapped('project_id.alias_name') return", "self.analytic_account_id.id} action['domain'] = [('account_id', '=', self.analytic_account_id.id)] return action def action_view_all_rating(self):", "in headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0, 'project.project-%s, ' % self.project_id.id)", "not default.get('name'): default['name'] = _(\"%s (copy)\", self.name) if self.recurrence_id: default['recurrence_id']", "self.name) if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id')", "'&', ('res_model', '=', 'project.project'), ('res_id', 'in', self.ids), '&', ('res_model', '=',", "not task._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of", "for task in self: recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=',", "repeat_week = fields.Selection([ ('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('last',", "def email_split(self, msg): email_list = tools.email_split((msg.get('to') or '') + ','", "email.\", index=True, compute='_compute_email_from', store=\"True\", readonly=False) allowed_user_ids = fields.Many2many('res.users', string=\"Visible to\",", "with this project. Incoming emails are automatically synchronized \" \"with", "update context, with all default values as 'quick_create' does not", "fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if 'recurring_task' in", "for task in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def", "recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result =", "!= project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for project", "= fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False) tue = fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False) wed", "vals): allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals", "not contains all field in its view if self._context.get('default_project_id'): default_project", "project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if not self.alias_enabled: self.alias_name = False", "Date'), ('after', 'Number of Repetitions'), ], default=\"forever\", string=\"Until\", compute='_compute_repeat', readonly=False)", "partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if", "in self: if project.partner_id and project.partner_id.email != project.partner_email: project.partner_email =", "is folded in the kanban view when there are no", "1][0] if 'repeat_until' in default_fields: vals['repeat_until'] = fields.Date.today() + timedelta(days=7)", "readonly=False) mon = fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False) tue = fields.Boolean(string=\"Tue\", compute='_compute_repeat',", "== 1: return children return children + children._get_all_subtasks(depth - 1)", "Invited internal users: employees may only see the followed project", "'active' in vals and not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False})", "Tags of project's tasks \"\"\" _name = \"project.tags\" _description =", "store=True, readonly=False, domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\") partner_is_company", "index=True, required=True, tracking=True) description = fields.Html() active = fields.Boolean(default=True, help=\"If", "res_model & res_id displayed_image_id = fields.Many2one('ir.attachment', domain=\"[('res_model', '=', 'project.task'), ('res_id',", "no records in that stage to display.') rating_template_id = fields.Many2one(", "'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)],", "preserve task name and stage, normally altered during copy defaults", "fields.Boolean(string=\"Recurrent\") recurring_count = fields.Integer(string=\"Tasks in Recurrence\", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence',", "'Rating Frequency', required=True, default='monthly') _sql_constraints = [ ('project_date_greater', 'check(date >=", "'portal_customer' and portal_privacy: group_data['has_button_access'] = True return groups def _notify_get_reply_to(self,", "and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task in", "return res def message_unsubscribe(self, partner_ids=None, channel_ids=None): \"\"\" Unsubscribe from all", "working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator=\"avg\") working_days_close", "self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating", "will be created. It can be the current project itself.\")", "in self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids =", "fields.Many2one('res.users', string='Assigned to', default=lambda self: self.env.uid, index=True, tracking=True) partner_id =", "partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id',", "\"priority desc, sequence, id desc\" _check_company_auto = True def _get_default_stage_id(self):", "default=_get_default_project_ids) legend_blocked = fields.Char( 'Red Kanban Label', default=lambda s: _('Blocked'),", "vals and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True)", "\"\"\" for task in self: if task.partner_id: if task.project_id.partner_id: task.partner_id", "# remove default author when going through the mail gateway.", "= self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|', '&', ('res_model', '=', 'project.project'),", "required=True, copy=True, default=_default_company_id) color = fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email',", "action to see all the rating of the project and", "self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for task in self: if not", "return { 'name': _('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id,", "stage.\\n\" \"- Periodical Rating: email will be sent periodically.\\n\\n\" \"Don't", "all the sub-tasks linked to this task. Usually less or", "ValidationError, RedirectWarning from odoo.tools.misc import format_date, get_lang from odoo.osv.expression import", "project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if not self.alias_enabled:", "required=True) color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [ ('name_uniq', 'unique", "def stage_find(self, section_id, domain=[], order='sequence'): \"\"\" Override of the base.stage", "{ 'name': _('Parent Task'), 'view_mode': 'form', 'res_model': 'project.task', 'res_id': self.parent_id.id,", "super(Task, self).default_get(default_fields) days = list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if all(d", "self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users = all_users - portal_users self.allowed_portal_user_ids", "_compute_is_favorite(self): for project in self: project.is_favorite = self.env.user in project.favorite_user_ids", "= project for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks'", "('company_id', '=', company_id)]\") partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False,", "@api.depends('partner_id.email') def _compute_partner_email(self): for project in self: if project.partner_id and", "task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format", "= fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain=\"['|', ('company_id', '=', False),", "index=True, default=10, help=\"Gives the sequence order when displaying a list", "pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids, {} ))", "directly compute is_favorite to dodge allow write access right if", "f in rec_fields: if task.recurrence_id: task[f] = task.recurrence_id[f] else: if", "in changes and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message': True,", "'=', True), ('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ]) for", "_map_tasks_default_valeus(self, task, project): \"\"\" get the default value for the", "l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count =", "- project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url() for project in self:", "5 or task.repeat_type == 'forever' or len(recurring_dates) > 5: task.recurrence_message", "Allows keeping the batch creation of tasks # 2) Ensure", "message_new(self, msg, custom_values=None): \"\"\" Overrides mail_thread message_new that is called", "_check_company_auto = True def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for project", "# ---------------------------------------- # Case management # ---------------------------------------- def stage_find(self, section_id,", "if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids),", "now = fields.Datetime.now() if 'parent_id' in vals and vals['parent_id'] in", "message.attachment_ids and not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype ==", "for task in self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self):", "be created. It can be the current project itself.\") allow_subtasks", "required=True, help='Override the default value displayed for the done state", "repeat_month = fields.Selection([ ('january', 'January'), ('february', 'February'), ('march', 'March'), ('april',", "vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now # reset kanban state when", "from the customer will update the kanban state to 'ready", "= fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline", "correct (and computed once by project), # by using default", "if self.email_from and not self.partner_id: # we consider that posting", "_compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=',", "task.working_days_open = duration_data['days'] else: task.working_hours_open = 0.0 task.working_days_open = 0.0", "tasks = self.env['project.task'] # We want to copy archived task,", "= task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self): for task in", "channel_ids=None, subtype_ids=None): \"\"\" Add the users subscribed to allowed portal", "action def action_recurring_tasks(self): return { 'name': 'Tasks in Recurrence', 'type':", "of the project.\", translate=True) tasks = fields.One2many('project.task', 'project_id', string=\"Task Activities\")", "user subscribed to allowed portal users \"\"\" res = super(Project,", "[ ('fold', '=', False), ('is_closed', '=', False)]) else: task.stage_id =", "key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks = {}", "_compute_rating_request_deadline(self): periods = {'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly':", "project in self.with_context(active_test=False): if project.tasks: raise UserError(_('You cannot delete a", "if name: tname = name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project',", "+ (msg.get('cc') or '')) # check left-part is not already", "'%s','default_res_id': %d}\" % (self._name, self.id) return action def _compute_is_favorite(self): for", "and task.partner_id.email != task.partner_email: task.partner_email = task.partner_id.email def _inverse_partner_email(self): for", "override updates the document according to the email. \"\"\" #", "'in', project.task_ids.ids) ]) def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids),", "= 'Task Stage' _order = 'sequence, id' def _get_default_project_ids(self): default_project_id", "copy defaults = self._map_tasks_default_valeus(task, project) if task.parent_id: # set the", "task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def", "you to hide the project without removing it.\") sequence =", "third generation # If depth <= 0, return all children", "order when displaying a list of Projects.\") partner_id = fields.Many2one('res.partner',", "@api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for project in self: users =", "Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False) legend_done", "partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task: task.project_id.privacy_visibility ==", "self)._notify_email_header_dict() if self.project_id: current_objects = [h for h in headers.get('X-Odoo-Objects',", "else: task.partner_id = task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self):", "project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def", "see the project's tasks. (%s)\", user_names)) def _compute_attachment_ids(self): for task", "privacy of the project to 'Visible by following customers' in", "self.project_id.partner_id return res def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None): return", "is None: default = {} if not default.get('name'): default['name'] =", "recurring_task = fields.Boolean(string=\"Recurrent\") recurring_count = fields.Integer(string=\"Tasks in Recurrence\", compute='_compute_recurring_count') recurrence_id", "in default_fields: vals['repeat_until'] = fields.Date.today() + timedelta(days=7) if 'repeat_weekday' in", "task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'): task.access_warning = _(", "kanban state when the customer replies to the feedback for", "Task', index=True) child_ids = fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\", context={'active_test': False}) subtask_project_id", "in range(1, 32) ], compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([ ('first',", "the default value displayed for the normal state for kanban", "'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun',", "fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked", "headers def _message_post_after_hook(self, message, msg_vals): if message.attachment_ids and not self.displayed_image_id:", "will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone if will_write_email and", "not vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self recurrence_update = vals.pop('recurrence_update', 'this')", "that don't come from message.\") # In the domain of", "order='sequence'): \"\"\" Override of the base.stage method Parameter of the", "in section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain += list(domain) # perform", "defaults = ast.literal_eval(self.alias_defaults or \"{}\") defaults['project_id'] = self.id return values", "fields.Datetime.now() # recurrence rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields", "recurring_count = fields.Integer(string=\"Tasks in Recurrence\", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False)", "now # reset kanban state when changing stage if 'kanban_state'", "return analytic_account def _create_analytic_account(self): for project in self: analytic_account =", "return res def _creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one()", "= fields.Selection([ (str(i), str(i)) for i in range(1, 32) ],", "= image_attachments[0] if self.email_from and not self.partner_id: # we consider", "'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values and self.kanban_state ==", "project is linked for financial management. \" \"Use an analytic", "repeat_unit = fields.Selection([ ('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year',", "self.name) + ':', 'default_parent_id': self.id, # will give default subtask", "return randint(1, 11) name = fields.Char('Name', required=True) color = fields.Integer(string='Color',", "if 'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals)", "\"The task cannot be shared with the recipient(s) because the", "of the base.stage method Parameter of the stage search taken", "_inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order = \"sequence, name,", "+= new_task return project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self', lambda value:", "task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month,", "15, 'monthly': 30, 'quarterly': 90, 'yearly': 365} for project in", "that it was created through the chatter using # suggested", "task.partner_email != task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for", "for project in self: if self.env.user in project.favorite_user_ids: favorite_projects |=", "recipient(s) because the privacy of the project is too restricted.", "# stage change: update date_last_stage_update if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id']))", "\"\"\" Subscribe to all existing active tasks when subscribing to", "'email_layout_xmlid': 'mail.mail_notification_light' }) return res def _creation_subtype(self): return self.env.ref('project.mt_task_new') def", "_message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients() for task in self: if", "= False @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('Error!", "self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not in default: self.map_tasks(project.id) return", "if task.repeat_type == 'until': task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>')", "= \"Project\" _inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order =", "task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message", "task in self: for f in rec_fields: if task.recurrence_id: task[f]", "section_id, domain=[], order='sequence'): \"\"\" Override of the base.stage method Parameter", "days = list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if all(d in default_fields", "recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task in", "to allowed portal users \"\"\" res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids,", "= project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids", "index=True, tracking=True) partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain=\"['|',", "fields rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields: rec_values =", "task.kanban_state == 'normal': task.kanban_state_label = task.legend_normal elif task.kanban_state == 'blocked':", "automatically synchronized \" \"with Tasks (or optionally Issues if the", "str([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', 'in', self.ids), '&',", "= fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage',", "= task.partner_id.phone def _inverse_partner_phone(self): for task in self: if task.partner_id", "fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])]) #", "fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain=\"['|', ('company_id', '=', False), ('company_id',", "name = self.env['project.project'].browse(project_id).label_tasks if name: tname = name.lower() self =", "pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and pdata['id'] in", "(self.name) project = super(Project, self).copy(default) if self.subtask_project_id == self: project.subtask_project_id", "== 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda pdata:", "see project and tasks followed by\\n\" \" them or by", "If a task has no partner_id, use the project partner_id", "== 'portal_customer' and portal_privacy: group_data['has_button_access'] = True return groups def", "description = fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel',", "if 'kanban_state' not in vals: vals['kanban_state'] = 'normal' # user_id", "lead: - section_id: if set, stages must belong to this", "= '/my/task/%s' % task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning() for task", "= 0.0 if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date,", "['project_id']) result = dict((data['project_id'][0], data['project_id_count']) for data in task_data) for", "list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({", "string='Visibility', required=True, default='portal', help=\"Defines the visibility of the tasks of", "also. 2) if the parent task partner_id changes, the task", "[fn(n) for day, fn in DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)]", "self.env['project.task'].browse(task_ids): # preserve task name and stage, normally altered during", "is installed).\") privacy_visibility = fields.Selection([ ('followers', 'Invited internal users'), ('employees',", "fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task Project', readonly=True) allow_subtasks = fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\",", "'June'), ('july', 'July'), ('august', 'August'), ('september', 'September'), ('october', 'October'), ('november',", "context={'active_test': False}) subtask_project_id = fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task Project', readonly=True) allow_subtasks", "'').split(',') if h] current_objects.insert(0, 'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects'] =", "and project.partner_phone != project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self):", "automatically changed also. 2) if the parent task partner_id changes,", "def write(self, vals): allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids'", "None) return dict(action, context=action_context) # --------------------------------------------------- # Business Methods #", "attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|', '&', ('res_model',", "vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals) if vals else True", "alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias',", "if task.project_id.partner_id: task.partner_id = task.project_id.partner_id else: task.partner_id = task.project_id.partner_id or", "= duration_data['hours'] task.working_days_open = duration_data['days'] else: task.working_hours_open = 0.0 task.working_days_open", "new one directly from notification emails. Also give access button", "'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban State', copy=False, default='normal',", "(not a follower, a specific one) # on a document", "local_msg_vals = dict(msg_vals or {}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func", "self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False) alias_id", "= self.browse(new_project_id) tasks = self.env['project.task'] # We want to copy", "Visibility\") # Computed field about working time elapsed between record", "section_id)) search_domain += list(domain) # perform search, return the first", "super(Task, self)._notify_email_header_dict() if self.project_id: current_objects = [h for h in", "= dict(self.env.context) context['stage_view'] = stage_view return { 'name': _('Delete Stage'),", "if 'default_project_id' in self.env.context: search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])]", "fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True) date_deadline = fields.Date(string='Deadline', index=True, copy=False,", "'ir.actions.act_window', 'context': dict(self._context, create=False) } def action_subtask(self): action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\")", "task.project_id.partner_id: task.partner_id = task.project_id.partner_id else: task.partner_id = task.project_id.partner_id or task.parent_id.partner_id", "groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\")", "fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status", "compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close = duration_data['days'] else: task.working_hours_close =", "30, 'quarterly': 90, 'yearly': 365} for project in self: project.rating_request_deadline", "import timedelta, datetime from random import randint from odoo import", "Manager', related='project_id.user_id', readonly=True) company_id = fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True,", "'recurring_task' in vals and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self", "compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([ ('forever', 'Forever'), ('until', 'End Date'),", "the default value displayed for the done state for kanban", "project.allowed_user_ids |= project.partner_id.user_ids return project def write(self, vals): allowed_users_changed =", "managers recipients that can assign tasks and create new one", "res = super(Task, self).rating_get_partner_id() if not res and self.project_id.partner_id: return", "'name': values.get('name', _('Unknown Analytic Account')), 'company_id': values.get('company_id') or self.env.company.id, 'partner_id':", "'partner_id': values.get('partner_id'), 'active': True, }) return analytic_account def _create_analytic_account(self): for", "super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): \"\"\" Handle project users and", "compute='_compute_attachment_ids', string=\"Main Attachments\", help=\"Attachment that don't come from message.\") #", "self).write(vals) if vals else True if allowed_users_changed: for project in", "'new', 'context': context, } def write(self, vals): if 'active' in", "subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self): return", "gateway # --------------------------------------------------- def _track_template(self, changes): res = super(Task, self)._track_template(changes)", "a task has no partner_id, use the project partner_id if", "('march', 'March'), ('april', 'April'), ('may', 'May'), ('june', 'June'), ('july', 'July'),", "== 'day') or (task.repeat_unit == 'year' and task.repeat_on_year == 'day')", "@api.depends('project_id') def _compute_stage_id(self): for task in self: if task.project_id: if", "'=', company_id)]\") partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True,", "+ search_domain stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active", "if no other responsible is # found. create_context = dict(self.env.context", "0) def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|',", "setting doesn't allow portal users to see the project's tasks.", "str(fields.Datetime.today().day) if 'repeat_month' in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0]", "subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete=\"restrict\", help=\"Project in which sub-tasks", "_compute_access_url(self): super(Project, self)._compute_access_url() for project in self: project.access_url = '/my/project/%s'", "task.partner_email: task.partner_email = task.partner_id.email def _inverse_partner_email(self): for task in self:", "self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url': take_action, 'title': _('I take it')}]", "in init_values and self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label'", "% (self.name) project = super(Project, self).copy(default) if self.subtask_project_id == self:", "'day') task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week' task.repeat_show_month =", "default=lambda s: _('Blocked'), translate=True, required=True, help='Override the default value displayed", "self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids #", "= fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False) thu = fields.Boolean(string=\"Thu\", compute='_compute_repeat', readonly=False) fri", "this change, the customer email will also be updated.') elif", "_('By saving this change, the customer email and phone number", "value displayed for the blocked state for kanban selection, when", "ctx = dict(self.env.context) ctx = {k: v for k, v", "by following customers' in order to make it accessible by", "help=\"Attachment that don't come from message.\") # In the domain", "for h in headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0, 'project.project-%s, '", "not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name) + ':', 'default_parent_id': self.id,", "date_end = fields.Datetime(string='Ending Date', index=True, copy=False) date_assign = fields.Datetime(string='Assigning Date',", "if 'active' in vals and not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active':", "project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else None if", "commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False,", "project_id = self.env.context.get('default_project_id', False) if project_id: name = self.env['project.project'].browse(project_id).label_tasks if", "partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return recipients", "def _read_group_stage_ids(self, stages, domain, order): search_domain = [('id', 'in', stages.ids)]", "all section_ids section_ids = [] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain", "will_write_email = task.partner_id and task.partner_email != task.partner_id.email will_write_phone = task.partner_id", "default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True) create_date", "project if any. \"\"\" aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None)", "partner_id remains the same. \"\"\" for task in self: if", "# ---------------------------------------- def stage_find(self, section_id, domain=[], order='sequence'): \"\"\" Override of", "task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and task.repeat_on_year ==", "= 0 recurring_tasks = self.filtered(lambda l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id',", "The Week', compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([ ('january', 'January'), ('february',", "in aliases] @api.model def message_new(self, msg, custom_values=None): \"\"\" Overrides mail_thread", "delete a project containing tasks. You can either archive it", "return res def email_split(self, msg): email_list = tools.email_split((msg.get('to') or '')", "= self.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self,", "self: project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self,", "def _compute_attachment_ids(self): for task in self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=',", "= fields.Many2one('res.users', string='Assigned to', default=lambda self: self.env.uid, index=True, tracking=True) partner_id", "in ('customer', 'user') or group_name == 'portal_customer' and not portal_privacy:", "('stage_id', '=', False)]) color = fields.Integer(string='Color Index') user_id = fields.Many2one('res.users',", "False @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('Error! You", "give access button to portal users and portal customers. If", "that stage.') mail_template_id = fields.Many2one( 'mail.template', string='Email Template', domain=[('model', '=',", "recurrence_domain = [] if recurrence_update == 'subsequent': for task in", "# --------------------------------------------------- # Actions # --------------------------------------------------- def toggle_favorite(self): favorite_projects =", "= fields.Many2one( 'mail.template', string='Email Template', domain=[('model', '=', 'project.task')], help=\"If set", "section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = [] if section_ids: search_domain = [('|')]", "return super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients()", "if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def _read_group_stage_ids(self, stages,", "history website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in',", "for task in self: if not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id", "their company.\") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users',", "partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char(", "help=\"How to get customer feedback?\\n\" \"- Rating when changing stage:", "default=1, compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([ ('date', 'Date of the", "], default='date', compute='_compute_repeat', readonly=False) mon = fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False) tue", "vals['parent_id'] in self.ids: raise UserError(_(\"Sorry. You can't set a task", "task.partner_id.email != task.partner_email: task.partner_email = task.partner_id.email def _inverse_partner_email(self): for task", "store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True,", "= fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal',", "'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year', 'Years'), ], default='week', compute='_compute_repeat',", "of current task action['domain'] = [('id', 'child_of', self.id), ('id', '!=',", "def _track_template(self, changes): res = super(Task, self)._track_template(changes) test_task = self[0]", "vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result", "'project_ids': self.ids }) return { 'name': _('Confirmation'), 'view_mode': 'form', 'res_model':", "else: task.working_hours_close = 0.0 task.working_days_close = 0.0 (self - task_linked_to_calendar).update(dict.fromkeys(", "%(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval >", "self.email_from) if new_partner: self.search([ ('partner_id', '=', False), ('email_from', '=', new_partner.email),", "Stage' _order = 'sequence, id' def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id')", "if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_phone = project.partner_id.phone def", "= fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed Portal Users\", domain=[('share', '=', True)]) doc_count", "update_vals=None): \"\"\" Override to update the task according to the", "self.task_ids.ids) ]) action['context'] = \"{'default_res_model': '%s','default_res_id': %d}\" % (self._name, self.id)", "'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active': True, }) project.write({'analytic_account_id': analytic_account.id}) #", "== 'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id)", "in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def create(self,", "group_operator=\"avg\") # customer portal: include comment and incoming emails in", "its sub-tasks).', tracking=True) subtask_planned_hours = fields.Float(\"Sub-tasks Planned Hours\", compute='_compute_subtask_planned_hours', help=\"Sum", "all existing ratings _check_company_auto = True def _compute_attached_docs_count(self): Attachment =", "self.env['project.project'].sudo() for project in self: if self.env.user in project.favorite_user_ids: favorite_projects", "sequence, id desc\" _check_company_auto = True def _get_default_stage_id(self): \"\"\" Gives", "('september', 'September'), ('october', 'October'), ('november', 'November'), ('december', 'December'), ], compute='_compute_repeat',", "self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda pdata: pdata['type'] == 'portal' and", "or self.repeat_number) and\\ (self.repeat_type != 'until' or self.repeat_until and self.repeat_until", "readonly=False) manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True) company_id =", "class ProjectTags(models.Model): \"\"\" Tags of project's tasks \"\"\" _name =", "readonly=False, store=True, copy=False) company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self:", "vals if allowed_users_changed: allowed_users = {project: project.allowed_user_ids for project in", "index=True, tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete=\"restrict\", help=\"Project in", "self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func = lambda pdata: pdata['type'] ==", "subtasks of current task action['domain'] = [('id', 'child_of', self.id), ('id',", "'kanban_state_label' in init_values and self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked') elif", "in self} leftover = self.filtered(lambda rec: not rec.project_id) if leftover:", "task.partner_id.phone def _inverse_partner_phone(self): for task in self: if task.partner_id and", "if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return tasks def write(self, vals):", "= fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id',", "'All tasks'), ], default='this', store=False) recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message')", "project.\", translate=True) tasks = fields.One2many('project.task', 'project_id', string=\"Task Activities\") resource_calendar_id =", "self._get_weekdays()) and\\ (self.repeat_type != 'after' or self.repeat_number) and\\ (self.repeat_type !=", "= recurrence.id if 'recurring_task' in vals and not vals.get('recurring_task'): self.recurrence_id.unlink()", "\"\"\" If a task has no partner_id, use the project", "self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values and self.kanban_state == 'done': return", "when changing stage if 'kanban_state' not in vals: vals['kanban_state'] =", "the Issue Tracker module is installed).\") privacy_visibility = fields.Selection([ ('followers',", "dialog to stop the recurrence raise UserError(_('You cannot delete recurring", "msg_vals): if message.attachment_ids and not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a:", "# Mail gateway # --------------------------------------------------- def _track_template(self, changes): res =", "'target': 'new', 'context': context, } def write(self, vals): if 'active'", "state when changing stage if 'kanban_state' not in vals: vals['kanban_state']", "# Part of Odoo. See LICENSE file for full copyright", "x: x.project_id.privacy_visibility != 'portal'): task.access_warning = _( \"The task cannot", "task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'): portal_users = task.allowed_user_ids.filtered('share')", "self.project_id.privacy_visibility == 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda", "new_project_id): \"\"\" copy and map tasks from old to new", "translate=True, required=True, help='Override the default value displayed for the done", "| project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else None", "def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed',", "the duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults)", "hierarchy of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task in self.filtered(lambda", "= self.filtered(lambda task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for", "('sun', 'Sunday'), ], string='Day Of The Week', compute='_compute_repeat', readonly=False) repeat_month", "groups = [new_group] + groups if self.project_id.privacy_visibility == 'portal': allowed_user_ids", "'rating.parent.mixin'] _order = \"sequence, name, id\" _rating_satisfaction_days = False #", "fields.Many2one( 'mail.template', string='Email Template', domain=[('model', '=', 'project.task')], help=\"If set an", "stages \"\"\" # collect all section_ids section_ids = [] if", "[(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': context,", "Portal users may see project and tasks followed by\\n\" \"", "analytic account \"\"\" action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] = {'default_account_id': self.analytic_account_id.id}", "of %s') % (self.name,) action_context = ast.literal_eval(action['context']) if action['context'] else", "= fields.Selection([ ('this', 'This task'), ('subsequent', 'This and following tasks'),", "and task.repeat_on_year == 'date') task.repeat_show_week = task.recurring_task and (task.repeat_unit ==", "specified recipient (not a follower, a specific one) # on", "'Task Stage' _order = 'sequence, id' def _get_default_project_ids(self): default_project_id =", "that stage # a task can be in a stage", "task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data", "recurrence_update = vals.pop('recurrence_update', 'this') if recurrence_update != 'this': recurrence_domain =", "self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or \"{}\")", "for project in self: users = project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids", "recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if 'recurring_task' in vals", "consider that posting a message with a specified recipient (not", "ValidationError(_('Error! You cannot create recursive hierarchy of task(s).')) @api.model def", "ondelete=\"restrict\", help=\"Project in which sub-tasks of the current project will", "project in self: project.alias_enabled = project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids')", "project.partner_id.user_ids return project def write(self, vals): allowed_users_changed = 'allowed_portal_user_ids' in", "= fields.Many2one( 'mail.template', string='Rating Email Template', domain=[('model', '=', 'project.task')], help=\"If", "email=task.email_from, reason=_('Customer Email')) return recipients def _notify_email_header_dict(self): headers = super(Task,", "for project in self: if project.partner_id and project.partner_phone != project.partner_id.phone:", "def _inverse_partner_email(self): for project in self: if project.partner_id and project.partner_email", "doc_names=doc_names)) return res def email_split(self, msg): email_list = tools.email_split((msg.get('to') or", "('after', 'Number of Repetitions'), ], default=\"forever\", string=\"Until\", compute='_compute_repeat', readonly=False) repeat_until", "# a task can be in a stage even if", "--------------------------------------------------- # Rating business # --------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for", "_(\"%s (copy)\") % (self.name) project = super(Project, self).copy(default) if self.subtask_project_id", "don't come from message.\") # In the domain of displayed_image_id,", "when the customer replies to the feedback for this stage.\\n\"", "{ 'stage_id': task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id, } def map_tasks(self,", "= False elif group_name == 'portal_customer' and portal_privacy: group_data['has_button_access'] =", "custom_values is None: custom_values = {} defaults = { 'name':", "notification emails. Also give access button to portal users and", "the stages for which you want to get the customer's", "project.task_ids.ids) ]) def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|',", "_inverse_partner_phone(self): for project in self: if project.partner_id and project.partner_phone !=", "self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals) if vals else True if", "normally altered during copy defaults = self._map_tasks_default_valeus(task, project) if task.parent_id:", "'target': 'new', 'context': self.env.context, } def unlink(self): # Check project", "x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self, stage_id):", "h] current_objects.insert(0, 'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if", "self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id !=", "not p.rating_active) if disabled_projects: stage.disabled_rating_warning = '\\n'.join('- %s' % p.name", "of task(s).')) @api.model def get_empty_list_help(self, help): tname = _(\"task\") project_id", "self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of %s') % (self.name,) action_context =", "project.access_warning = _( \"The project cannot be shared with the", "0 recurring_tasks = self.filtered(lambda l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in',", "users and managers recipients that can assign tasks and create", "self.env[\"project.project\"].browse( project_id ).company_id.id or self.env.company.id if project_id and \"stage_id\" not", "tasks of the project:\\n\" \"- Invited internal users: employees may", "vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month' in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month", "@api.model def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month',", "vals.pop('recurrence_update', 'this') if recurrence_update != 'this': recurrence_domain = [] if", "for rec_field in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values)", "issue reaches this step.\") fold = fields.Boolean(string='Folded in Kanban', help='This", "= ast.literal_eval(action['context']) if action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name", "and create new one directly from notification emails. Also give", "default_project.company_id.id if default_project else self.env.company.id, }) action['context'] = ctx return", "'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for task", "res def _creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one() if", "is linked for financial management. \" \"Use an analytic account", "# ------------------------------------------------ @api.model def default_get(self, default_fields): vals = super(Task, self).default_get(default_fields)", "when changing stage: an email will be sent when a", "'stage_id' in vals and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status", "False}) if 'active' in vals: # archiving/unarchiving a project does", "return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self): return 'project_id'", "that can assign tasks and create new one directly from", "called by the mailgateway through message_process. This override updates the", "if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id or", "readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email',", "if p] task.message_subscribe(partner_ids) return task def message_update(self, msg, update_vals=None): \"\"\"", "project's analytic account \"\"\" action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] = {'default_account_id':", "for d in days): vals[days[week_start]] = True if 'repeat_day' in", "Stage Update', index=True, copy=False, readonly=True) project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id',", "!= 'followers': task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self):", "records=self, force_create=False) if p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals) def", "a project containing tasks. You can either archive it or", "self.filtered(lambda project: project.privacy_visibility == 'portal'): project.allowed_user_ids |= project.partner_id.user_ids return res", "and map tasks from old to new project \"\"\" project", "email_list = self.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list,", "[('recurrence_id', 'in', self.recurrence_id.ids)], } # --------------------------------------------------- # Rating business #", "the parent task partner_id changes, the task partner_id remains the", "archived task, but do not propagate an active_test context key", "all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users = all_users -", "depth == 1: return children return children + children._get_all_subtasks(depth -", "'=', False)]) else: task.stage_id = False @api.returns('self', lambda value: value.id)", "business # --------------------------------------------------- # This method should be called once", "--------------------------------------------------- # Rating business # --------------------------------------------------- # This method should", "('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'),", "self.filtered(lambda x: x.privacy_visibility != 'portal'): project.access_warning = _( \"The project", "Tasks (or optionally Issues if the Issue Tracker module is", "{}) if not self.user_id and not self.stage_id.fold: take_action = self._notify_get_action_link('assign',", "default.get('name'): default['name'] = _(\"%s (copy)\", self.name) if self.recurrence_id: default['recurrence_id'] =", "for project in self: project.access_url = '/my/project/%s' % project.id def", "self: permission_removed = allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids)", "@api.model def default_get(self, default_fields): vals = super(Task, self).default_get(default_fields) days =", "task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks = {} for", "favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])] name", "default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True)", "'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': context, } def write(self,", "= task.partner_id.email def _inverse_partner_email(self): for task in self: if task.partner_id", "fields.Datetime.now() if 'parent_id' in vals and vals['parent_id'] in self.ids: raise", "required=True, help=\"How to get customer feedback?\\n\" \"- Rating when changing", "date_last_stage_update if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now #", "self: project.doc_count = Attachment.search_count([ '|', '&', ('res_model', '=', 'project.project'), ('res_id',", "= False for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()):", "= _('By saving this change, the customer phone number will", "vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self recurrence_update = vals.pop('recurrence_update', 'this') if", "the kanban view when there are no records in that", "subtype_ids=None): \"\"\" Add the users subscribed to allowed portal users", "= [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p]", "Attachments\", help=\"Attachment that don't come from message.\") # In the", "for vals in vals_list: project_id = vals.get('project_id') or self.env.context.get('default_project_id') if", ").default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] = default_stage[project_id] # user_id change: update date_assign if", "default value displayed for the done state for kanban selection,", "in init_values: return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None):", "archive recurring tasks. Please, disable the recurrence first.')) # stage", "'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility')", "= {} for task in self.env['project.task'].browse(task_ids): # preserve task name", "this step.\") auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False, help=\"Automatically modify", "UserError, AccessError, ValidationError, RedirectWarning from odoo.tools.misc import format_date, get_lang from", "task.partner_id.phone if will_write_email and will_write_phone: task.ribbon_message = _('By saving this", "self: task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from)", "customers' in order to make it accessible by the recipient(s).\")", "if p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients", "self: if self.env.user in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects", "allowed_users - project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url() for project in", "customers. If they are notified they should probably have access", "in that stage.') mail_template_id = fields.Many2one( 'mail.template', string='Email Template', domain=[('model',", "repeat_day = fields.Selection([ (str(i), str(i)) for i in range(1, 32)", "empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None,", "allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string=\"Recurrent\") recurring_count = fields.Integer(string=\"Tasks in", "copyright and licensing details. import ast from datetime import timedelta,", "not in default: self.map_tasks(project.id) return project @api.model def create(self, vals):", "parent to the duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task", "= allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task", "= fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return ['repeat_interval',", "Email') if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from:", "readonly=True) allow_subtasks = fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True) subtask_count = fields.Integer(\"Sub-task", "'forever' or len(recurring_dates) > 5: task.recurrence_message += '<li>...</li>' task.recurrence_message +=", "== 'after' else 5) delta = task.repeat_interval if task.repeat_unit ==", "_('Blocked'), translate=True, required=True, help='Override the default value displayed for the", "fields.Html(string='Description') priority = fields.Selection([ ('0', 'Normal'), ('1', 'Important'), ], default='0',", "token=None, feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def", "tasks).write(vals) # rating on stage if 'stage_id' in vals and", "as its parent task.\")) if 'active' in vals and not", "Progress'), ('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban State', copy=False, default='normal', required=True)", "'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context, create=False) } def action_subtask(self):", "in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not in default: self.map_tasks(project.id)", "('res_model', '=', 'project.project'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'project.task'),", "readonly=False, store=True, copy=False) ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city =", "the rating of the project and activate default filters\"\"\" action", "if any. \"\"\" aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res", "revenue on your project.\") favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id',", "is represented as a list of commands so we used", "want to explicitly set user_id to False; however we do", "fields.Float(\"Initially Planned Hours\", help='Time planned to achieve this task (including", "\"stage_id\" not in vals: # 1) Allows keeping the batch", "dict(self.env.context) context['stage_view'] = stage_view return { 'name': _('Delete Stage'), 'view_mode':", "'until' or self.repeat_until and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self):", "self.alias_name = False def _compute_alias_enabled(self): for project in self: project.alias_enabled", "already an alias aliases = self.mapped('project_id.alias_name') return [x for x", "Email', readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main Attachments\", help=\"Attachment", "the same. \"\"\" for task in self: if task.partner_id: if", "the document. \"\"\" groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals", "in task_data) for project in self: project.task_count = result.get(project.id, 0)", "in self.filtered(lambda project: project.privacy_visibility == 'portal'): project.allowed_user_ids |= project.partner_id.user_ids return", "return self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------ # CRUD overrides #", "new_task = task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks += new_task return", "'portal' for group_name, group_method, group_data in groups: if group_name in", "if not self._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy", "portal_users elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end',", "customer when the task reaches this step.\") auto_validation_kanban_state = fields.Boolean('Automatic", "readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\") # Computed field", "# rating on stage if 'stage_id' in vals and vals.get('stage_id'):", "the gateway user to be responsible if no other responsible", "customer means that it was created through the chatter using", "string='Working days to close', store=True, group_operator=\"avg\") # customer portal: include", "'title': _('I take it')}] new_group[2]['actions'] = project_actions groups = [new_group]", "-= permission_removed if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id',", "== 'until': task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count':", "task.allowed_user_ids.filtered('share') if portal_users: user_names = ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The project", "fields.Selection([ ('this', 'This task'), ('subsequent', 'This and following tasks'), ('all',", "vals['recurrence_id'] = recurrence.id tasks = super().create(vals_list) for task in tasks:", "Kanban Label', default=lambda s: _('In Progress'), translate=True, required=True, help='Override the", "self: rating_template = task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def", "_get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda children: children.active) if not children:", "self: task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids)", "compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([ ('january', 'January'), ('february', 'February'), ('march',", "_compute_stage_id(self): for task in self: if task.project_id: if task.project_id not", "fields.Char(string='Use Tasks as', default='Tasks', help=\"Label used for the tasks of", "task.kanban_state == 'blocked': task.kanban_state_label = task.legend_blocked else: task.kanban_state_label = task.legend_done", "not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink()", "change_default=True) planned_hours = fields.Float(\"Initially Planned Hours\", help='Time planned to achieve", "self.env.company.id, }) action['context'] = ctx return action def action_recurring_tasks(self): return", "readonly=False) repeat_weekday = fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'),", "project and tasks followed by\\n\" \" them or by someone", "'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new',", "self.project_id = False @api.depends('project_id.company_id') def _compute_company_id(self): for task in self.filtered(lambda", "@api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): \"\"\" If a task has no", "and incoming emails in communication history website_message_ids = fields.One2many(domain=lambda self:", "in self: if task.partner_id and task.partner_email != task.partner_id.email: task.partner_id.email =", "is in that stage.') mail_template_id = fields.Many2one( 'mail.template', string='Email Template',", "self._get_recurrence_fields() defaults = self.default_get(rec_fields) for task in self: for f", "= task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close = duration_data['days']", "self.id: values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or \"{}\") defaults['project_id'] =", "self} leftover = self.filtered(lambda rec: not rec.project_id) if leftover: res.update(super(Task,", "empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None,", "self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values =", "subtask_count = fields.Integer(\"Sub-task count\", compute='_compute_subtask_count') email_from = fields.Char(string='Email From', help=\"These", "Incoming emails are automatically synchronized \" \"with Tasks (or optionally", "view when there are no records in that stage to", "% project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning() for project in self.filtered(lambda", "False), ('is_closed', '=', False)]) @api.model def _default_company_id(self): if self._context.get('default_project_id'): return", "if the project's rating configuration is 'Rating when changing stage',", "assign tasks and create new one directly from notification emails.", "and vals['parent_id'] in self.ids: raise UserError(_(\"Sorry. You can't set a", "else: task.kanban_state_label = task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url() for task", "('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly') _sql_constraints =", "not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids:", "|= self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals) # rating on stage", "force_create=False) if p] task.message_subscribe(partner_ids) return task def message_update(self, msg, update_vals=None):", "def _send_task_rating_mail(self, force_send=False): for task in self: rating_template = task.stage_id.rating_template_id", "tasks += new_task return project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self', lambda", "(project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else", "self.company_id: self.project_id = False @api.depends('project_id.company_id') def _compute_company_id(self): for task in", "task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self): for", "vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active')", "'=', False)]) color = fields.Integer(string='Color Index') user_id = fields.Many2one('res.users', string='Project", "set, stages must be default stages \"\"\" # collect all", "task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids", "'=', False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id':", "rating configuration is 'Rating when changing stage', then an email", "= Attachment.search_count([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', '=', project.id),", "now # recurrence fields rec_fields = vals.keys() & self._get_recurrence_fields() if", "update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed: return", "'Day of the Year'), ], default='date', compute='_compute_repeat', readonly=False) mon =", "p.rating_active) if disabled_projects: stage.disabled_rating_warning = '\\n'.join('- %s' % p.name for", "must be lower than project end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self):", "in self: if task.partner_id and task.partner_id.email != task.partner_email: task.partner_email =", "task in self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] =", "project in self} # directly compute is_favorite to dodge allow", "@api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task in self: portal_users =", "not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image') if", "UserError(_('You cannot archive recurring tasks. Please, disable the recurrence first.'))", "index=True, copy=False) date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True) date_deadline", "users: employees may see everything.\" \" Portal users may see", "_is_recurrence_valid(self): self.ensure_one() return self.repeat_interval > 0 and\\ (not self.repeat_show_dow or", "= fields.Selection([ ('followers', 'Invited internal users'), ('employees', 'All internal users'),", "if any, or else the parent task partner_id. Once the", "the privacy of the project is too restricted. Set the", "self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return {'date_end':", "def _compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda task: task.project_id.resource_calendar_id and task.create_date", "Portal Users\", domain=[('share', '=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string=\"Number of", "= fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True) create_date = fields.Datetime(\"Created On\",", "self.env.context.get('default_project_id', False) if project_id: name = self.env['project.project'].browse(project_id).label_tasks if name: tname", "in vals: # 1) Allows keeping the batch creation of", "string='Rating Email Template', domain=[('model', '=', 'project.task')], help=\"If set and if", "active = fields.Boolean('Active', default=True) name = fields.Char(string='Stage Name', required=True, translate=True)", "project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active': True, }) project.write({'analytic_account_id': analytic_account.id})", "Date', index=True, copy=False, readonly=True) date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True)", "allow portal users to see the project's tasks. (%s)\", user_names))", "can either archive it or first delete all of its", "= self.name action_context.pop('group_by', None) return dict(action, context=action_context) # --------------------------------------------------- #", "in vals: vals['kanban_state'] = 'normal' # user_id change: update date_assign", "def message_new(self, msg, custom_values=None): \"\"\" Overrides mail_thread message_new that is", "context, } def write(self, vals): if 'active' in vals and", "copy=False, readonly=True) project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False, index=True,", "set the parent to the duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id,", "fields.Char( 'Green Kanban Label', default=lambda s: _('Ready'), translate=True, required=True, help='Override", "fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users',", "= '<ul>' for date in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' %", "Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit", "this change, the customer email and phone number will also", "in vals: vals[\"company_id\"] = self.env[\"project.project\"].browse( project_id ).company_id.id or self.env.company.id if", "fields.Many2one('project.task', string='Parent Task', index=True) child_ids = fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\", context={'active_test':", "Template', domain=[('model', '=', 'project.task')], help=\"If set an email will be", "if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users =", "task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -= portal_users", "= fields.Many2one('ir.attachment', domain=\"[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike',", "vals_list): default_stage = dict() for vals in vals_list: project_id =", "project's tasks \"\"\" _name = \"project.tags\" _description = \"Project Tags\"", "]) action['context'] = \"{'default_res_model': '%s','default_res_id': %d}\" % (self._name, self.id) return", "domain=\"[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]\", string='Cover", "# 2) Ensure the defaults are correct (and computed once", "mon = fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False) tue = fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False)", "and task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and task.repeat_on_year", "= fields.Selection([ ('january', 'January'), ('february', 'February'), ('march', 'March'), ('april', 'April'),", "portal users to see the project's tasks. (%s)\", user_names)) def", "analytic_accounts_to_delete = self.env['account.analytic.account'] for project in self: if project.analytic_account_id and", "in self: if self.env.user in project.favorite_user_ids: favorite_projects |= project else:", "task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url() for task in self: task.access_url", "if h] current_objects.insert(0, 'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects)", "super(Task, self)._compute_access_warning() for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'):", "@api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task in self.filtered(lambda t: t.project_id.privacy_visibility !=", "_default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def _read_group_stage_ids(self,", "for task in self: if task.partner_id and task.partner_email != task.partner_id.email:", "!= project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if not", "\"\"\" Tags of project's tasks \"\"\" _name = \"project.tags\" _description", "children to third generation # If depth <= 0, return", "= fields.Many2one('res.currency', related=\"company_id.currency_id\", string=\"Currency\", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\",", "tracking=True, check_company=True, change_default=True) planned_hours = fields.Float(\"Initially Planned Hours\", help='Time planned", "project creation self = self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals) if", "it.\") sequence = fields.Integer(default=10, help=\"Gives the sequence order when displaying", "contains all field in its view if self._context.get('default_project_id'): default_project =", "msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If depth == 1,", "for project in readgroup] + self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids':", "vals.get('privacy_visibility'): for project in self.filtered(lambda project: project.privacy_visibility == 'portal'): project.allowed_user_ids", "self: users = project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids = users def", "Explanation', readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True,", "to False, it will allow you to hide the project", "= self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks = {} for task", "image_attachments[0] if self.email_from and not self.partner_id: # we consider that", "Set the privacy of the project to 'Visible by following", "dict(self.env.context or {}) create_context['default_user_id'] = False if custom_values is None:", "False; however we do not # want the gateway user", "elif 'stage_id' in init_values: return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def", "'=', self.env.context['default_project_id'])] + search_domain stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return", "section_id: if set, stages must belong to this section or", "a document without customer means that it was created through", "for data in task_data) for project in self: project.task_count =", "tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete=\"restrict\", help=\"Project in which", "this change, the customer phone number will also be updated.')", "allowed_user_ids new_group = ('group_project_user', group_func, {}) if not self.user_id and", "task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False),", "change: update date_assign if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() # Stage", "task(s).')) @api.model def get_empty_list_help(self, help): tname = _(\"task\") project_id =", "def _compute_partner_phone(self): for task in self: if task.partner_id and task.partner_phone", "all project and tasks.\\n\" \"- Invited portal and all internal", "close', store=True, group_operator=\"avg\") working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign',", "if self.project_id: current_objects = [h for h in headers.get('X-Odoo-Objects', '').split(',')", "'project_id', string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char( 'Red Kanban Label', default=lambda", "= sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids) @api.depends('child_ids') def", "and task.repeat_on_year == 'day') task.repeat_show_dow = task.recurring_task and task.repeat_unit ==", "'project.task.type' _description = 'Task Stage' _order = 'sequence, id' def", "task partner_id has been set: 1) if the project partner_id", "for the tasks of the project.\", translate=True) tasks = fields.One2many('project.task',", "value.id) def copy(self, default=None): if default is None: default =", "x.project_id.privacy_visibility != 'portal'): task.access_warning = _( \"The task cannot be", "in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] =", "groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or {}) self.ensure_one()", "('group_project_user', group_func, {}) if not self.user_id and not self.stage_id.fold: take_action", "False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage", "]) for project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model):", "self recurrence_update = vals.pop('recurrence_update', 'this') if recurrence_update != 'this': recurrence_domain", "if self.project_id.company_id != self.company_id: self.project_id = False @api.depends('project_id.company_id') def _compute_company_id(self):", "(task.repeat_unit == 'month' and task.repeat_on_month == 'date') or (task.repeat_unit ==", "('normal', 'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban State', copy=False,", "with a specified recipient (not a follower, a specific one)", "up the mail templates on the stages for which you", "default=10, help=\"Gives the sequence order when displaying a list of", "and stage, normally altered during copy defaults = self._map_tasks_default_valeus(task, project)", "the task partner_id remains the same. \"\"\" for task in", "'November'), ('december', 'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day", "['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order = \"sequence, name, id\" _rating_satisfaction_days", "according to the email. \"\"\" # remove default author when", "self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): \"\"\" Handle project users and managers", "= fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating'))", "'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c", "task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for task in", "stage_view return { 'name': _('Delete Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard',", "help): tname = _(\"task\") project_id = self.env.context.get('default_project_id', False) if project_id:", "task.partner_phone != task.partner_id.phone if will_write_email and will_write_phone: task.ribbon_message = _('By", "in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign)", "required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True) create_date =", "Kanban', help='This stage is folded in the kanban view when", "in another stage.\\n\" \"- Periodical Rating: email will be sent", "the copied task on project duplication \"\"\" return { 'stage_id':", "action def action_view_account_analytic_line(self): \"\"\" return the action to see all", "and\\ (not self.repeat_show_dow or self._get_weekdays()) and\\ (self.repeat_type != 'after' or", "than project end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self): for project in", "'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False, required=True, copy=True, default=_default_company_id) color =", "fields.Datetime(\"Last Updated On\", readonly=True, index=True) date_end = fields.Datetime(string='Ending Date', index=True,", "image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image') if image_attachments: self.displayed_image_id", "_mail_post_access = 'read' _order = \"priority desc, sequence, id desc\"", "default=lambda s: _('In Progress'), translate=True, required=True, help='Override the default value", "if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility !=", "all default values as 'quick_create' does not contains all field", "fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator=\"avg\") working_days_close = fields.Float(compute='_compute_elapsed',", "related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False) legend_normal", "we consider that posting a message with a specified recipient", "'=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ]) for project in projects:", "'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context':", "and project.partner_phone != project.partner_id.phone: project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self): for", "string='Kanban Blocked Explanation', readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid", "self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients = super(Task,", "and\\ (self.repeat_type != 'after' or self.repeat_number) and\\ (self.repeat_type != 'until'", "['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count}", "project_id = self.env.context.get('default_project_id') if not project_id: return False return self.stage_find(project_id,", "= self.env[\"project.project\"].browse( project_id ).company_id.id or self.env.company.id if project_id and \"stage_id\"", "in default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] = default_stage[project_id]", "its tasks.')) # Delete the empty related analytic account analytic_accounts_to_delete", "fields.Many2many('res.users', string=\"Visible to\", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility =", "], default=\"forever\", string=\"Until\", compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string=\"End Date\", compute='_compute_repeat',", "task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year' @api.depends('recurring_task') def _compute_repeat(self):", "of the Year'), ], default='date', compute='_compute_repeat', readonly=False) mon = fields.Boolean(string=\"Mon\",", "True return groups def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None): \"\"\"", "recurring tasks. Please, disable the recurrence first.')) return super().unlink() #", "('company_id', '=', False), ('company_id', '=', company_id)]\") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True)", "], default='date', compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([ ('date', 'Date of", "is # found. create_context = dict(self.env.context or {}) create_context['default_user_id'] =", "= fields.Many2many('res.users', 'project_allowed_internal_users_rel', string=\"Allowed Internal Users\", default=lambda self: self.env.user, domain=[('share',", "self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return res def _creation_subtype(self): return self.env.ref('project.mt_task_new')", "project in self: if project.partner_id and project.partner_email != project.partner_id.email: project.partner_id.email", "stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active = fields.Boolean(default=True)", "if portal_users: user_names = ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The project visibility", "'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday')", "tasks def write(self, vals): now = fields.Datetime.now() if 'parent_id' in", "not self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url': take_action,", "action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") # display all subtasks of current task", "if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res = super(Task,", "Override to set alias of tasks to their project if", "copied task on project duplication \"\"\" return { 'stage_id': task.stage_id.id,", "feedbacks.\") rating_status_period = fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice", "string='Sub-task Project', readonly=True) allow_subtasks = fields.Boolean(string=\"Allow Sub-tasks\", related=\"project_id.allow_subtasks\", readonly=True) subtask_count", "in self: disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active) if disabled_projects:", "== 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids", "== 'date') task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and", "help='Time planned to achieve this task (including its sub-tasks).', tracking=True)", "{} for task in self.env['project.task'].browse(task_ids): # preserve task name and", "child_task.subtask_planned_hours for child_task in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for task", "False)]) @api.model def _default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company", "class ProjectTaskType(models.Model): _name = 'project.task.type' _description = 'Task Stage' _order", "Prevent double project creation self = self.with_context(mail_create_nosubscribe=True) project = super(Project,", "or _(\"No Subject\"), 'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id') }", "self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_phone = task.partner_id.phone", "= ('group_project_user', group_func, {}) if not self.user_id and not self.stage_id.fold:", "stages.ids)] if 'default_project_id' in self.env.context: search_domain = ['|', ('project_ids', '=',", "creation and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign',", "self: self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed", "account \"\"\" action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain']", "= self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda pdata: pdata['type'] == 'portal'", "project.is_favorite = self.env.user in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects = not_fav_projects", "= self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help) def", "domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\") partner_email = fields.Char(", "display all subtasks of current task action['domain'] = [('id', 'child_of',", "= fields.Date.today() + timedelta(days=7) if 'repeat_weekday' in default_fields: vals['repeat_weekday'] =", "email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or", "company.\") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel',", "k, v in ctx.items() if not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name',", "= dict(msg_vals or {}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func =", "project.partner_email = project.partner_id.email def _inverse_partner_email(self): for project in self: if", "JS. new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from) if new_partner:", "in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' % date.strftime(date_format) if task.repeat_type ==", "full copyright and licensing details. import ast from datetime import", "children # If depth == 3, return children to third", "task.kanban_state_label = task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url() for task in", "and tasks.\\n\" \"- All internal users: employees may see all", "'name': 'Tasks in Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form',", "date = fields.Date.today() number_occurrences = min(5, task.repeat_number if task.repeat_type ==", "\" * A good feedback from the customer will update", "days): vals[days[week_start]] = True if 'repeat_day' in default_fields: vals['repeat_day'] =", "if all(d in default_fields for d in days): vals[days[week_start]] =", "subtype_ids else None task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal", "'|', '&', ('res_model', '=', 'project.project'), ('res_id', 'in', self.ids), '&', ('res_model',", "for task in self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date']", "and pdata['id'] in allowed_user_ids, {} )) portal_privacy = self.project_id.privacy_visibility ==", "project = super(Project, self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id = project.id", "index=True, copy=False, readonly=True) date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update", "default stage; if not set, stages must be default stages", "[(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self): action = self.with_context(active_id=self.id,", "email_list = task.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list,", "the domain of displayed_image_id, we couln't use attachment_ids because a", "elif task.kanban_state == 'blocked': task.kanban_state_label = task.legend_blocked else: task.kanban_state_label =", "index=True, compute='_compute_email_from', store=\"True\", readonly=False) allowed_user_ids = fields.Many2many('res.users', string=\"Visible to\", groups='project.group_project_manager',", "Project User has no write access for project. not_fav_projects.write({'favorite_user_ids': [(4,", "= stage.project_ids.filtered(lambda p: not p.rating_active) if disabled_projects: stage.disabled_rating_warning = '\\n'.join('-", "Of The Week', compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([ ('january', 'January'),", "'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message", "'context': dict(self._context, create=False) } def action_subtask(self): action = self.env[\"ir.actions.actions\"]._for_xml_id(\"project.project_task_action_sub_task\") #", "and project.partner_id.email != project.partner_email: project.partner_email = project.partner_id.email def _inverse_partner_email(self): for", "Index') user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) attachment_ids =", "\"\"\" # collect all section_ids section_ids = [] if section_id:", "task has no partner_id, use the project partner_id if any,", "data['project_id_count']) for data in task_data) for project in self: project.task_count", "is_favorite to dodge allow write access right if 'is_favorite' in", "string='Parent Task', index=True) child_ids = fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\", context={'active_test': False})", "Project on dashboard', help=\"Whether this project should be displayed on", "task in self.filtered(lambda task: task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id') def", "Rating') ], 'Customer Ratings Status', default=\"stage\", required=True, help=\"How to get", "return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values()", "return [x for x in email_list if x.split('@')[0] not in", "self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help)", "('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban State', copy=False, default='normal', required=True) kanban_state_label", "fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator=\"avg\") working_days_open = fields.Float(compute='_compute_elapsed',", "fields.Many2one( 'mail.template', string='Rating Email Template', domain=[('model', '=', 'project.task')], help=\"If set", "None active = fields.Boolean('Active', default=True) name = fields.Char(string='Stage Name', required=True,", "\"\"\" action = self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain'] =", "customer email and phone number will also be updated.') elif", "\"project.task\" _description = \"Task\" _date_name = \"date_assign\" _inherit = ['portal.mixin',", "cost and revenue on your project.\") favorite_user_ids = fields.Many2many( 'res.users',", "# In the domain of displayed_image_id, we couln't use attachment_ids", "not in vals: # 1) Allows keeping the batch creation", "task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values)", "= task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for task in self: if", "= False # takes all existing ratings _check_company_auto = True", "project_actions groups = [new_group] + groups if self.project_id.privacy_visibility == 'portal':", "_compute_recurrence_message(self): self.recurrence_message = False for task in self.filtered(lambda t: t.recurring_task", "and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count = 0", "if custom_values is None: custom_values = {} defaults = {", "task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format =", "fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator=\"avg\") working_hours_close = fields.Float(compute='_compute_elapsed',", "default_project else self.env.company.id, }) action['context'] = ctx return action def", "+= list(domain) # perform search, return the first found return", "= project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for project in self: if", "fields.Selection([ ('january', 'January'), ('february', 'February'), ('march', 'March'), ('april', 'April'), ('may',", "= self[0] if 'stage_id' in changes and test_task.stage_id.mail_template_id: res['stage_id'] =", "they should probably have access to the document. \"\"\" groups", "pdata['id'] in allowed_user_ids, {} )) portal_privacy = self.project_id.privacy_visibility == 'portal'", "= now # reset kanban state when changing stage if", "def _message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients() for task in self:", "\"Project\" _inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order = \"sequence,", "portal_users: user_names = ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The project visibility setting", "\"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values", "'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return res def", "Progress'), translate=True, required=True, help='Override the default value displayed for the", "the parent to the duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False)", "active tasks when subscribing to a project And add the", "task in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_phone", "for task in self: task.repeat_show_day = task.recurring_task and (task.repeat_unit ==", "self[0] if 'stage_id' in changes and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id,", "fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False, ondelete='set null', domain=\"['|', ('company_id', '=', False),", "False return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)]) @api.model", "first.')) # stage change: update date_last_stage_update if 'stage_id' in vals:", "readonly=False) repeat_number = fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([", "= 'project.task.type' _description = 'Task Stage' _order = 'sequence, id'", "to assign', store=True, group_operator=\"avg\") working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to", "in tasks: if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return tasks def", "Recurrence\", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([ ('this',", "duration_data['days'] else: task.working_hours_open = 0.0 task.working_days_open = 0.0 if task.date_end:", "lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id() if not", "timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self, task, project): \"\"\" get the", "the email. \"\"\" # remove default author when going through", "== 'blocked': task.kanban_state_label = task.legend_blocked else: task.kanban_state_label = task.legend_done def", "self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id',", "= fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company) currency_id = fields.Many2one('res.currency',", "('last', 'Last'), ], default='first', compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([ ('mon',", "'parent_id.email_from') def _compute_email_from(self): for task in self: task.email_from = task.partner_id.email", "= fields.Date(string=\"End Date\", compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat',", "recurrence raise UserError(_('You cannot archive recurring tasks. Please, disable the", "{ 'name': 'Tasks in Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode':", "project does it on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if", "task.recurrence_message += '<li>...</li>' task.recurrence_message += '</ul>' if task.repeat_type == 'until':", "!= task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for task", "portal_privacy: group_data['has_button_access'] = False elif group_name == 'portal_customer' and portal_privacy:", "perform search, return the first found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id", "self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0] for project", "_track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label' in init_values and self.kanban_state ==", "self.id)] # update context, with all default values as 'quick_create'", "1: return children return children + children._get_all_subtasks(depth - 1) def", "_order = 'sequence, id' def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return", "in self: if task.partner_id: reason = _('Customer Email') if task.partner_id.email", "changed also. 2) if the parent task partner_id changes, the", "!= task.partner_id.phone if will_write_email and will_write_phone: task.ribbon_message = _('By saving", "def action_recurring_tasks(self): return { 'name': 'Tasks in Recurrence', 'type': 'ir.actions.act_window',", "'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char( 'Red Kanban Label',", "if vals else True if allowed_users_changed: for project in self:", "!= self.company_id: self.project_id = False @api.depends('project_id.company_id') def _compute_company_id(self): for task", "feedback=feedback, subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model): \"\"\" Tags", "repeat_weekday = fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu',", "about working time elapsed between record creation and assignation/closing. working_hours_open", "index=True) child_ids = fields.One2many('project.task', 'parent_id', string=\"Sub-tasks\", context={'active_test': False}) subtask_project_id =", "False), ('email_from', '=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return super(Task,", "least 1 task in that stage # a task can", "False), ('is_closed', '=', False)]) else: task.stage_id = False @api.returns('self', lambda", "= self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks = super().create(vals_list) for task", "fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False) sun = fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False) repeat_day =", "self).default_get(default_fields) days = list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if all(d in", "= self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return", "= fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])])", "of project's tasks \"\"\" _name = \"project.tags\" _description = \"Project", "'user' and project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility == 'followers': allowed_user_ids", "will be sent periodically.\\n\\n\" \"Don't forget to set up the", "False elif group_name == 'portal_customer' and portal_privacy: group_data['has_button_access'] = True", "= fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True) date_deadline = fields.Date(string='Deadline', index=True,", "= fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if 'recurring_task'", "task._portal_ensure_token() return tasks def write(self, vals): now = fields.Datetime.now() if", "self.subtask_project_id == self: project.subtask_project_id = project for follower in self.message_follower_ids:", "default get (instead of _get_default_stage_id or _stage_find), if project_id not", "to make it accessible by the recipient(s).\") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self):", "self.ensure_one() if self.repeat_unit == 'week': return [fn(n) for day, fn", "('forever', 'Forever'), ('until', 'End Date'), ('after', 'Number of Repetitions'), ],", "import randint from odoo import api, fields, models, tools, SUPERUSER_ID,", "* A medium or a bad feedback will set the", "== 'after' and task.repeat_number > 5 or task.repeat_type == 'forever'", "configuration is 'Rating when changing stage', then an email will", "in self: rating_template = task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send)", "in that stage # a task can be in a", "analytic account to record cost and revenue on your project.\")", "and phone number will also be updated.') elif will_write_email: task.ribbon_message", "task in project.task_ids: task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks' in vals", "for task in self: task.access_url = '/my/task/%s' % task.id def", "string='User Email', readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main Attachments\",", "represented as a list of commands so we used res_model", "this stage.\\n\" \" * A good feedback from the customer", "default value displayed for the blocked state for kanban selection,", "task in self: if not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id #", "Issues if the Issue Tracker module is installed).\") privacy_visibility =", "legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False) legend_normal =", "= \"project.tags\" _description = \"Project Tags\" def _get_default_color(self): return randint(1,", "tasks.\") stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True,", "change, the customer email will also be updated.') elif will_write_phone:", "in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self): for", "stage_view=False): self = self.with_context(active_test=False) # retrieves all the projects with", "action['context'] = \"{'default_res_model': '%s','default_res_id': %d}\" % (self._name, self.id) return action", "to 'ready for the new stage' (green bullet).\\n\" \" *", "x.privacy_visibility != 'portal'): project.access_warning = _( \"The project cannot be", "else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer", "ondelete=\"restrict\", required=True, help=\"Internal email associated with this project. Incoming emails", "res and self.project_id.partner_id: return self.project_id.partner_id return res def rating_apply(self, rate,", "values): analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic Account')), 'company_id':", "or first delete all of its tasks.')) # Delete the", "project_ids, 'stage_ids': self.ids }) context = dict(self.env.context) context['stage_view'] = stage_view", "-= portal_users elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if", "vals[\"company_id\"] = self.env[\"project.project\"].browse( project_id ).company_id.id or self.env.company.id if project_id and", "else self.env.company.id, }) action['context'] = ctx return action def action_recurring_tasks(self):", "res = super(Task, self)._track_template(changes) test_task = self[0] if 'stage_id' in", "stage, normally altered during copy defaults = self._map_tasks_default_valeus(task, project) if", "= 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals if allowed_users_changed:", "= fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) company_id =", "'End Date'), ('after', 'Number of Repetitions'), ], default=\"forever\", string=\"Until\", compute='_compute_repeat',", "action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None) return dict(action, context=action_context) # ---------------------------------------------------", "and pdata['id'] in allowed_user_ids new_group = ('group_project_user', group_func, {}) if", "not self.user_id and not self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions", "== 'user' and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids", "for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'): task.access_warning =", "task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return recipients def _notify_email_header_dict(self): headers", "fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'), ('monthly',", "_(\"No Subject\"), 'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values)", "vals['date_assign'] = fields.Datetime.now() # Stage change: Update date_end if folded", "Tags\" def _get_default_color(self): return randint(1, 11) name = fields.Char('Name', required=True)", "the customer phone number will also be updated.') else: task.ribbon_message", "project's tasks. (%s)\", user_names)) def _compute_attachment_ids(self): for task in self:", "'project.task')], help=\"If set and if the project's rating configuration is", "can assign tasks and create new one directly from notification", "= task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res", "change: Update date_end if folded stage and date_last_stage_update if vals.get('stage_id'):", "project and tasks.\\n\" \"- Invited portal and all internal users:", "task in tasks: if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return tasks", "task.working_hours_open = duration_data['hours'] task.working_days_open = duration_data['days'] else: task.working_hours_open = 0.0", "mail_thread task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility')", "when unsubscribing from a project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project,", "string=\"Task Count\") task_ids = fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=',", "if 'repeat_day' in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month' in", "string='Kanban State Label', tracking=True) create_date = fields.Datetime(\"Created On\", readonly=True, index=True)", "self.filtered(lambda task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user", "@api.depends('partner_id.phone') def _compute_partner_phone(self): for task in self: if task.partner_id and", "{project: project.allowed_user_ids for project in self} # directly compute is_favorite", "time planned of all the sub-tasks linked to this task.", "records=None, company=company, doc_names=doc_names)) return res def email_split(self, msg): email_list =", "= task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids - portal_users if task.project_id.privacy_visibility ==", "timedelta, datetime from random import randint from odoo import api,", "self.env.company @api.model def _read_group_stage_ids(self, stages, domain, order): search_domain = [('id',", "subtype_xmlid=\"project.mt_task_rating\") def _rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model): \"\"\" Tags of", "A medium or a bad feedback will set the kanban", "= [ ('name_uniq', 'unique (name)', \"Tag name already exists!\"), ]", "_name = \"project.project\" _description = \"Project\" _inherit = ['portal.mixin', 'mail.alias.mixin',", "= \"project.task\" _description = \"Task\" _date_name = \"date_assign\" _inherit =", "related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False) is_closed", "name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task,", "if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now # reset", "vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def create(self, vals_list): default_stage", "= self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for task", "internal_users return res def message_unsubscribe(self, partner_ids=None, channel_ids=None): \"\"\" Unsubscribe from", "Date') date = fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id = fields.Many2one('project.project',", "from old to new project \"\"\" project = self.browse(new_project_id) tasks", "Override to update the task according to the email. \"\"\"", "the action to see all the rating of the project", "task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self):", "displayed_image_id = fields.Many2one('ir.attachment', domain=\"[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype',", "is too restricted. Set the privacy of the project to", "rec_field in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id']", "# --------------------------------------------------- # Rating business # --------------------------------------------------- # This method", "super().create(vals_list) for task in tasks: if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token()", "[x for x in email_list if x.split('@')[0] not in aliases]", "task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids", "= fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done',", "False) if project_id: name = self.env['project.project'].browse(project_id).label_tasks if name: tname =", "is in that stage.') legend_done = fields.Char( 'Green Kanban Label',", "for child_task in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for task in", "self.project_id.privacy_visibility == 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata:", "readonly=False) repeat_on_month = fields.Selection([ ('date', 'Date of the Month'), ('day',", "super(Project, self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id = project.id if project.privacy_visibility", "= ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _order =", "self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals[\"stage_id\"] = default_stage[project_id] # user_id change: update", "and t._is_recurrence_valid()): date = fields.Date.today() number_occurrences = min(5, task.repeat_number if", "if self.project_id.privacy_visibility == 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users',", "fields.Selection([ ('normal', 'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban State',", "0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for task in self: if", "this step.\") fold = fields.Boolean(string='Folded in Kanban', help='This stage is", "count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for date in", "self)._compute_access_warning() for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'): task.access_warning", "Override of the base.stage method Parameter of the stage search", "delete recurring tasks. Please, disable the recurrence first.')) return super().unlink()", "fields.Boolean('Automatic kanban status', default=False, help=\"Automatically modify the kanban state when", "not in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [ ('fold', '=', False),", "because the privacy of the project is too restricted. Set", "related=\"company_id.currency_id\", string=\"Currency\", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False, ondelete='set", "False)]) color = fields.Integer(string='Color Index') user_id = fields.Many2one('res.users', string='Project Manager',", "compute='_compute_repeat', readonly=False) fri = fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False) sat = fields.Boolean(string=\"Sat\",", "lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] if", "self).unlink() analytic_accounts_to_delete.unlink() return result def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\"", "value: value.id) def copy(self, default=None): if default is None: default", "'portal'): project.allowed_user_ids |= project.partner_id.user_ids return res def action_unlink(self): wizard =", "def _send_rating_all(self): projects = self.search([ ('rating_active', '=', True), ('rating_status', '=',", "task[f] = defaults.get(f) else: task[f] = False def _get_weekdays(self, n=1):", "'Invited internal users'), ('employees', 'All internal users'), ('portal', 'Invited portal", "task.repeat_on_year == 'day') task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week'", "for project in self: project.doc_count = Attachment.search_count([ '|', '&', ('res_model',", "description = fields.Html() active = fields.Boolean(default=True, help=\"If the active field", "\" \"Use an analytic account to record cost and revenue", "self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): \"\"\" Handle project", "copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage Update', index=True, copy=False, readonly=True)", "self.ids }) context = dict(self.env.context) context['stage_view'] = stage_view return {", "group_name == 'portal_customer' and portal_privacy: group_data['has_button_access'] = True return groups", "to all existing active tasks when subscribing to a project", "self.recurring_count = 0 recurring_tasks = self.filtered(lambda l: l.recurrence_id) count =", "a specified recipient (not a follower, a specific one) #", "or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task in self: if", "self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])]) # recurrence", "incoming emails in communication history website_message_ids = fields.One2many(domain=lambda self: [('model',", "= super(Task, self)._track_template(changes) test_task = self[0] if 'stage_id' in changes", "= task.allowed_user_ids.filtered('share') if portal_users: user_names = ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_(\"The", "legend_normal = fields.Char( 'Grey Kanban Label', default=lambda s: _('In Progress'),", "portal_users self.allowed_internal_user_ids |= internal_users return res def message_unsubscribe(self, partner_ids=None, channel_ids=None):", "issue is in that stage.') legend_done = fields.Char( 'Green Kanban", "('january', 'January'), ('february', 'February'), ('march', 'March'), ('april', 'April'), ('may', 'May'),", "task or issue reaches this step.\") fold = fields.Boolean(string='Folded in", "planned_hours = fields.Float(\"Initially Planned Hours\", help='Time planned to achieve this", "task_count = fields.Integer(compute='_compute_task_count', string=\"Task Count\") task_ids = fields.One2many('project.task', 'project_id', string='Tasks',", "project.partner_email: project.partner_email = project.partner_id.email def _inverse_partner_email(self): for project in self:", "= ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers def", "_compute_email_from(self): for task in self: task.email_from = task.partner_id.email or ((task.partner_id", "the customer when the task reaches this step.\") auto_validation_kanban_state =", "the blocked state for kanban selection, when the task or", "user_id = fields.Many2one('res.users', string='Assigned to', default=lambda self: self.env.uid, index=True, tracking=True)", "issue is in that stage.') legend_normal = fields.Char( 'Grey Kanban", "color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [ ('name_uniq', 'unique (name)',", "'')) # check left-part is not already an alias aliases", "}) return res def _creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values):", "'sequence, id' def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return [default_project_id] if", "and self.project_id.partner_id: return self.project_id.partner_id return res def rating_apply(self, rate, token=None,", "first found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------ # CRUD", "super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values() values['alias_model_id']", "new project \"\"\" project = self.browse(new_project_id) tasks = self.env['project.task'] #", "in vals and vals['parent_id'] in self.ids: raise UserError(_(\"Sorry. You can't", "], default='week', compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([ ('forever', 'Forever'), ('until',", "task.\") user_id = fields.Many2one('res.users', string='Assigned to', default=lambda self: self.env.uid, index=True,", "string=\"Sub-tasks\", context={'active_test': False}) subtask_project_id = fields.Many2one('project.project', related=\"project_id.subtask_project_id\", string='Sub-task Project', readonly=True)", "task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>'", "financial management. \" \"Use an analytic account to record cost", "x: x.privacy_visibility != 'portal'): project.access_warning = _( \"The project cannot", "= fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self, task, project):", "mail gateway. Indeed we # do not want to explicitly", "--------------------------------------------------- # Actions # --------------------------------------------------- def toggle_favorite(self): favorite_projects = not_fav_projects", "def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year',", "write(self, vals): allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in", "the recurrence raise UserError(_('You cannot archive recurring tasks. Please, disable", "Please, disable the recurrence first.')) # stage change: update date_last_stage_update", "help=\"Internal email associated with this project. Incoming emails are automatically", "return all children without depth limit def _get_all_subtasks(self, depth=0): children", "= self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or", "the customer will update the kanban state to 'ready for", "collect all section_ids section_ids = [] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids)", "of the project is too restricted. Set the privacy of", "attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids =", "_get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit == 'week': return [fn(n) for", "State', copy=False, default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label',", "in that stage.') legend_normal = fields.Char( 'Grey Kanban Label', default=lambda", "return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month',", "self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string=\"Allowed Portal", "= self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id", "allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata: pdata['type'] == 'user'", "group_data['has_button_access'] = False elif group_name == 'portal_customer' and portal_privacy: group_data['has_button_access']", "readgroup] + self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids", "and portal customers. If they are notified they should probably", "in self: if not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id # ---------------------------------------------------", "if not self.user_id and not self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals)", "a one2many is represented as a list of commands so", "'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order = \"sequence, name, id\" _rating_satisfaction_days =", "the base.stage method Parameter of the stage search taken from", "self.with_context(active_test=False): if project.tasks: raise UserError(_('You cannot delete a project containing", "assign', store=True, group_operator=\"avg\") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close',", "list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task in", "task in self: recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id),", "= ','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self, message, msg_vals): if message.attachment_ids", "s: _('Blocked'), translate=True, required=True, help='Override the default value displayed for", "super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for task in self: if", "rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer Ratings', default=lambda self:", "phone number will also be updated.') else: task.ribbon_message = False", "duration_data['hours'] task.working_days_open = duration_data['days'] else: task.working_hours_open = 0.0 task.working_days_open =", "Unsubscribe from all tasks when unsubscribing from a project \"\"\"", "stage.') mail_template_id = fields.Many2one( 'mail.template', string='Email Template', domain=[('model', '=', 'project.task')],", "self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_id.phone = task.partner_phone", "in self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids", "x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self,", "generation # If depth <= 0, return all children without", "def _get_default_stage_id(self): \"\"\" Gives default stage_id \"\"\" project_id = self.env.context.get('default_project_id')", "copy=True, default=_default_company_id) color = fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email', string='User", "for project in self.filtered(lambda project: project.privacy_visibility == 'portal'): project.allowed_user_ids |=", "stage if 'kanban_state' not in vals: vals['kanban_state'] = 'normal' #", "by the recipient(s).\") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task in self:", "'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task',", "wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids }) context =", "task.create_date)]]) else: recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain)", "is pulled in another stage.\\n\" \"- Periodical Rating: email will", "'active' in vals: # archiving/unarchiving a project does it on", "task[f] = False def _get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit ==", "emails in communication history website_message_ids = fields.One2many(domain=lambda self: [('model', '=',", "string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count', string=\"Task Count\") task_ids = fields.One2many('project.task',", "default author when going through the mail gateway. Indeed we", "default_project_id else None active = fields.Boolean('Active', default=True) name = fields.Char(string='Stage", "self.ids), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', self.task_ids.ids) ]) action['context']", "task in self: task.access_url = '/my/task/%s' % task.id def _compute_access_warning(self):", "'=', False), ('company_id', '=', company_id)]\") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id", "self.search([ ('rating_active', '=', True), ('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now())", "store=True) rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status =", "= task.recurring_task and task.repeat_unit == 'week' task.repeat_show_month = task.recurring_task and", "have access to the document. \"\"\" groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals)", "directly from notification emails. Also give access button to portal", "permission_removed = allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for", "= self.env['ir.attachment'] for project in self: project.doc_count = Attachment.search_count([ '|',", "the recurrence first.')) # stage change: update date_last_stage_update if 'stage_id'", "attached\") date_start = fields.Date(string='Start Date') date = fields.Date(string='Expiration Date', index=True,", "'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _order = \"priority desc,", "'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda task: task.project_id.resource_calendar_id", "['project_id']) project_ids = list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids))", "issue is in that stage.') mail_template_id = fields.Many2one( 'mail.template', string='Email", "# --------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for task in self: rating_template", "portal user subscribed to allowed portal users \"\"\" res =", "def _rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model): \"\"\" Tags of project's", "== 3, return children to third generation # If depth", "to stop the recurrence raise UserError(_('You cannot archive recurring tasks.", "'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week',", "'|', '&', ('res_model', '=', 'project.project'), ('res_id', '=', project.id), '&', ('res_model',", "Count\") task_ids = fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=', False),", "self.repeat_show_dow or self._get_weekdays()) and\\ (self.repeat_type != 'after' or self.repeat_number) and\\", "rec: not rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names))", "be default stages \"\"\" # collect all section_ids section_ids =", "'=', self.id)], order='parent_id').ids old_to_new_tasks = {} for task in self.env['project.task'].browse(task_ids):", "pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility", "def _inverse_partner_email(self): for task in self: if task.partner_id and task.partner_email", "ratings _check_company_auto = True def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for", "someone of their company.\") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids", "project.subtask_project_id = project.id if project.privacy_visibility == 'portal' and project.partner_id.user_ids: project.allowed_user_ids", "_('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one()", "def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token,", "t.recurring_task and t._is_recurrence_valid()): date = fields.Date.today() number_occurrences = min(5, task.repeat_number", "to third generation # If depth <= 0, return all", "False, it will allow you to hide the project without", "# We want to copy archived task, but do not", "task or issue is in that stage.') legend_normal = fields.Char(", "stage are considered as closed.\") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self,", "# will give default subtask field in `default_get` 'default_company_id': default_project.company_id.id", "fields.Char(\"Name\", index=True, required=True, tracking=True) description = fields.Html() active = fields.Boolean(default=True,", "task.working_days_close = 0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'],", "True: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} rec_values['next_recurrence_date']", "project \"\"\" self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self):", "fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([ ('normal', 'In Progress'), ('done', 'Ready'),", "'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'), ], string='Day Of The Week',", "= [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self):", "!= task.partner_email: task.partner_email = task.partner_id.email def _inverse_partner_email(self): for task in", "`default_get` 'default_company_id': default_project.company_id.id if default_project else self.env.company.id, }) action['context'] =", "'ready for the new stage' (green bullet).\\n\" \" * A", "an alias aliases = self.mapped('project_id.alias_name') return [x for x in", "duration_data['hours'] task.working_days_close = duration_data['days'] else: task.working_hours_close = 0.0 task.working_days_close =", "compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone',", "@api.model def _send_rating_all(self): projects = self.search([ ('rating_active', '=', True), ('rating_status',", "in self: if task.project_id: if task.project_id not in task.stage_id.project_ids: task.stage_id", "elif group_name == 'portal_customer' and portal_privacy: group_data['has_button_access'] = True return", ") return super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\"", "project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result def message_subscribe(self,", "_send_task_rating_mail(self, force_send=False): for task in self: rating_template = task.stage_id.rating_template_id if", "= {project: project.allowed_user_ids for project in self} # directly compute", "tasks. Please, disable the recurrence first.')) return super().unlink() # ---------------------------------------------------", "# on a document without customer means that it was", "vals in vals_list: project_id = vals.get('project_id') or self.env.context.get('default_project_id') if project_id", "with a least 1 task in that stage # a", "tasks = self recurrence_update = vals.pop('recurrence_update', 'this') if recurrence_update !=", "of tasks to their project if any. \"\"\" aliases =", "working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator=\"avg\") working_hours_close", "def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): \"\"\" Subscribe to all existing", "tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([ ('normal', 'In Progress'),", "= (test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' })", "task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id() if", "is too restricted. Set the privacy to 'Visible by following", "project) if task.parent_id: # set the parent to the duplicated", "= {} if not default.get('name'): default['name'] = _(\"%s (copy)\", self.name)", "recurrence_update != 'this': recurrence_domain = [] if recurrence_update == 'subsequent':", "string=\"Allowed Internal Users\", default=lambda self: self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids", "= project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for project", "email_from = fields.Char(string='Email From', help=\"These people will receive email.\", index=True,", "method Parameter of the stage search taken from the lead:", "force_send=False): for task in self: rating_template = task.stage_id.rating_template_id if rating_template:", "self.env.context.get('name', self.name) + ':', 'default_parent_id': self.id, # will give default", "== self: project.subtask_project_id = project for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids,", "default='first', compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'),", "or ((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def", "if task.repeat_type == 'after' else 5) delta = task.repeat_interval if", "+ self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids })", "= fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours']", "also be updated.') elif will_write_phone: task.ribbon_message = _('By saving this", "'Customer Ratings Status', default=\"stage\", required=True, help=\"How to get customer feedback?\\n\"", "= ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order = \"sequence, name, id\"", "= self._map_tasks_default_valeus(task, project) if task.parent_id: # set the parent to", "1) Allows keeping the batch creation of tasks # 2)", "stages.browse(stage_ids) active = fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True, required=True, index=True)", "subscribed to allowed portal users \"\"\" res = super(Task, self).message_subscribe(partner_ids=partner_ids,", "# --------------------------------------------------- def _track_template(self, changes): res = super(Task, self)._track_template(changes) test_task", "want to copy archived task, but do not propagate an", "defaults = self.default_get(rec_fields) for task in self: for f in", "('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'), ('monthly', 'Once a Month'),", "Index') user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True)", "'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id,", "msg): email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc')", "}) return analytic_account def _create_analytic_account(self): for project in self: analytic_account", "if 'stage_id' in changes and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, {", "a message with a specified recipient (not a follower, a", "in self: portal_users = task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids - portal_users", "self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids }) context = dict(self.env.context) context['stage_view']", "and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for project in self:", "DAYS, WEEKS class ProjectTaskType(models.Model): _name = 'project.task.type' _description = 'Task", "subscribing to a project And add the portal user subscribed", "if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() # Stage change: Update date_end", "updated.') elif will_write_email: task.ribbon_message = _('By saving this change, the", "in self: for f in rec_fields: if task.recurrence_id: task[f] =", "to see all the analytic lines of the project's analytic", "email_list if x.split('@')[0] not in aliases] @api.model def message_new(self, msg,", "\"Don't forget to set up the mail templates on the", "= self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata: pdata['type'] == 'user' and", "def action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids) \\ .env.ref('project.act_project_project_2_project_task_all') \\ .sudo().read()[0]", "allowed_users = {project: project.allowed_user_ids for project in self} # directly", "the portal user subscribed to allowed portal users \"\"\" res", "for task in self.filtered(lambda task: task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id')", "('blocked', 'Blocked')], string='Kanban State', copy=False, default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label',", "vals and vals['parent_id'] in self.ids: raise UserError(_(\"Sorry. You can't set", "# preserve task name and stage, normally altered during copy", "project.access_url = '/my/project/%s' % project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning() for", "and (task.repeat_unit == 'month' and task.repeat_on_month == 'day') or (task.repeat_unit", "in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_phone =", "self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic Account')), 'company_id': values.get('company_id') or self.env.company.id,", "'default_company_id': default_project.company_id.id if default_project else self.env.company.id, }) action['context'] = ctx", "readonly=True) company_id = fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False, required=True,", "defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg) partner_ids", "or a bad feedback will set the kanban state to", "will receive email.\", index=True, compute='_compute_email_from', store=\"True\", readonly=False) allowed_user_ids = fields.Many2many('res.users',", "str(i)) for i in range(1, 32) ], compute='_compute_repeat', readonly=False) repeat_week", "associated with this project. Incoming emails are automatically synchronized \"", "for the new stage' (green bullet).\\n\" \" * A medium", "partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if", "self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return", "self.id)], order='parent_id').ids old_to_new_tasks = {} for task in self.env['project.task'].browse(task_ids): #", "the project is not assigned to the stage readgroup =", ".project_task_recurrence import DAYS, WEEKS class ProjectTaskType(models.Model): _name = 'project.task.type' _description", "portal and all internal users: employees may see everything.\" \"", "'this') if recurrence_update != 'this': recurrence_domain = [] if recurrence_update", "'kanban_state_label' in init_values and self.kanban_state == 'done': return self.env.ref('project.mt_task_ready') elif", "doc_names=None) res = {task.id: aliases.get(task.project_id.id) for task in self} leftover", "default_get(self, default_fields): vals = super(Task, self).default_get(default_fields) days = list(DAYS.keys()) week_start", "('february', 'February'), ('march', 'March'), ('april', 'April'), ('may', 'May'), ('june', 'June'),", "return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label' in init_values", "self.email_from and not self.partner_id: # we consider that posting a", "%d}\" % (self._name, self.id) return action def _compute_is_favorite(self): for project", "self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()): date = fields.Date.today() number_occurrences =", "fields.Selection([ ('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('last', 'Last'), ],", "string='Email Template', domain=[('model', '=', 'project.task')], help=\"If set an email will", "in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now # reset kanban state", "You can't set a task as its parent task.\")) if", "task: task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for task", "'April'), ('may', 'May'), ('june', 'June'), ('july', 'July'), ('august', 'August'), ('september',", "stage', then an email will be sent to the customer", "feedback from the customer will update the kanban state to", "import api, fields, models, tools, SUPERUSER_ID, _ from odoo.exceptions import", "t: t.project_id.privacy_visibility != 'portal'): portal_users = task.allowed_user_ids.filtered('share') if portal_users: user_names", "'user') or group_name == 'portal_customer' and not portal_privacy: group_data['has_button_access'] =", "of Odoo. See LICENSE file for full copyright and licensing", "'done': return self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values: return self.env.ref('project.mt_task_stage') return", "and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'),", "Gives default stage_id \"\"\" project_id = self.env.context.get('default_project_id') if not project_id:", "= fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([ ('normal', 'In Progress'), ('done',", "'Number of Repetitions'), ], default=\"forever\", string=\"Until\", compute='_compute_repeat', readonly=False) repeat_until =", "self.project_id ctx = dict(self.env.context) ctx = {k: v for k,", "a Month'), ('monthly', 'Once a Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')],", "id), ('mimetype', 'ilike', 'image')]\", string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban", "channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None task_subtypes", "search_domain.append(('project_ids', '=', section_id)) search_domain += list(domain) # perform search, return", "related=\"project_id.allow_subtasks\", readonly=True) subtask_count = fields.Integer(\"Sub-task count\", compute='_compute_subtask_count') email_from = fields.Char(string='Email", "fri = fields.Boolean(string=\"Fri\", compute='_compute_repeat', readonly=False) sat = fields.Boolean(string=\"Sat\", compute='_compute_repeat', readonly=False)", "in self: project.task_count = result.get(project.id, 0) def attachment_tree_view(self): action =", "must be default stages \"\"\" # collect all section_ids section_ids", "_alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id:", "fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\") # Computed field about working time elapsed", "self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until' in default_fields: vals['repeat_until'] = fields.Date.today()", "def _get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda children: children.active) if not", "[('stage', 'Rating when changing stage'), ('periodic', 'Periodical Rating') ], 'Customer", "the recurrence raise UserError(_('You cannot delete recurring tasks. Please, disable", "fields.Boolean(default=True, help=\"If the active field is set to False, it", "of all the sub-tasks linked to this task. Usually less", "'=', self.analytic_account_id.id)] return action def action_view_all_rating(self): \"\"\" return the action", "self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) #", "section_ids: search_domain = [('|')] * (len(section_ids) - 1) for section_id", "with the recipient(s) because the privacy of the project is", "for stage in self: disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active)", "users def _inverse_allowed_user(self): for project in self: allowed_users = project.allowed_user_ids", "Date\", compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string=\"Repetitions\", default=1, compute='_compute_repeat', readonly=False) repeat_on_month", "email. \"\"\" # remove default author when going through the", "and portal_privacy: group_data['has_button_access'] = True return groups def _notify_get_reply_to(self, default=None,", "project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed: return {'date_end': fields.Datetime.now()}", "p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p] self.message_subscribe(partner_ids) return super(Task,", "if folded stage and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] =", "fields.Boolean(string=\"Mon\", compute='_compute_repeat', readonly=False) tue = fields.Boolean(string=\"Tue\", compute='_compute_repeat', readonly=False) wed =", "tasks = fields.One2many('project.task', 'project_id', string=\"Task Activities\") resource_calendar_id = fields.Many2one( 'resource.calendar',", "len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id != self.company_id: self.project_id =", "users \"\"\" res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids:", "aliases = self.mapped('project_id.alias_name') return [x for x in email_list if", "readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False)", "'Periodical Rating') ], 'Customer Ratings Status', default=\"stage\", required=True, help=\"How to", "self.repeat_interval > 0 and\\ (not self.repeat_show_dow or self._get_weekdays()) and\\ (self.repeat_type", "view if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id", "= fields.Selection(related='project_id.privacy_visibility', string=\"Project Visibility\") # Computed field about working time", "{'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90,", "== 'date') or (task.repeat_unit == 'year' and task.repeat_on_year == 'date')", "= task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for task", "task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week' task.repeat_show_month = task.recurring_task", "ctx.items() if not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name) + ':',", "hacks in JS. new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from)", "following customers' in order to make it accessible by the", "('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'), ], string='Day", "self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id or self.project_id", "a specific one) # on a document without customer means", "partner_id if any, or else the parent task partner_id. Once", "None task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids", "compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain=\"[('project_ids', '=',", "get the default value for the copied task on project", "self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------ # CRUD overrides # ------------------------------------------------", "\"\"\" aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res = {task.id:", "0, tasks.ids)]}) @api.returns('self', lambda value: value.id) def copy(self, default=None): if", "string=\"Phone\", readonly=False, store=True, copy=False) company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda", "self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids", "'&', ('res_model', '=', 'project.project'), ('res_id', '=', project.id), '&', ('res_model', '=',", "action_context.pop('group_by', None) return dict(action, context=action_context) # --------------------------------------------------- # Business Methods", "task.repeat_type == 'until': task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') %", "tools, SUPERUSER_ID, _ from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning", "('id', '!=', self.id)] # update context, with all default values", "tracking=True) description = fields.Html() active = fields.Boolean(default=True, help=\"If the active", "super(Project, self).write(vals) if vals else True if allowed_users_changed: for project", "string='Show Project on dashboard', help=\"Whether this project should be displayed", "= fields.Boolean(string=\"Sun\", compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([ (str(i), str(i)) for", "False)]) else: task.stage_id = False @api.returns('self', lambda value: value.id) def", "if not children: return self.env['project.task'] if depth == 1: return", "self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self):", "== 'month' and task.repeat_on_month == 'day') or (task.repeat_unit == 'year'", "readonly=True, index=True) write_date = fields.Datetime(\"Last Updated On\", readonly=True, index=True) date_end", "self.user_id and not self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions =", "in self.with_context(active_test=False): if project.tasks: raise UserError(_('You cannot delete a project", "And add the portal user subscribed to allowed portal users", "(task.repeat_unit == 'month' and task.repeat_on_month == 'day') or (task.repeat_unit ==", "readonly=False) wed = fields.Boolean(string=\"Wed\", compute='_compute_repeat', readonly=False) thu = fields.Boolean(string=\"Thu\", compute='_compute_repeat',", "self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active", "date_last_stage_update = fields.Datetime(string='Last Stage Update', index=True, copy=False, readonly=True) project_id =", "take it')}] new_group[2]['actions'] = project_actions groups = [new_group] + groups", "self._name), ('message_type', 'in', ['email', 'comment'])]) # recurrence fields allow_recurring_tasks =", "values # --------------------------------------------------- # Actions # --------------------------------------------------- def toggle_favorite(self): favorite_projects", "'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'), ],", "and vals.get('recurring_task') is True: rec_values = {rec_field: vals[rec_field] for rec_field", "stages must belong to this section or be a default", "_inverse_allowed_user(self): for project in self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids =", "and task.partner_email != task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self):", "(red bullet).\\n\") is_closed = fields.Boolean('Closing Stage', help=\"Tasks in this stage", "also be updated.') elif will_write_email: task.ribbon_message = _('By saving this", "for kanban selection, when the task or issue is in", "initially time planned of this task.\") user_id = fields.Many2one('res.users', string='Assigned", "change: update date_assign if vals.get('user_id') and 'date_assign' not in vals:", "see everything.\" \" Portal users may see project and tasks", "('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'], ['project_id']) result =", "all field in its view if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id'])", "if set, stages must belong to this section or be", "values as 'quick_create' does not contains all field in its", "rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return res", "if not vals.get('subtask_project_id'): project.subtask_project_id = project.id if project.privacy_visibility == 'portal'", "'&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id', '=', False)],", "limit def _get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda children: children.active) if", "self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return { 'name': _('Confirmation'), 'view_mode': 'form',", "0, return all children without depth limit def _get_all_subtasks(self, depth=0):", "data in task_data) for project in self: project.task_count = result.get(project.id,", "when a task is pulled in another stage.\\n\" \"- Periodical", "help=\"If the active field is set to False, it will", "!= 'after' or self.repeat_number) and\\ (self.repeat_type != 'until' or self.repeat_until", "res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users =", "'week': return [fn(n) for day, fn in DAYS.items() if self[day]]", "allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks',", "be lower than project end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self): for", "= task.project_id.partner_id else: task.partner_id = task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from')", "= { 'name': msg.get('subject') or _(\"No Subject\"), 'email_from': msg.get('from'), 'planned_hours':", "if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email',", "depth limit def _get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda children: children.active)", "_description = 'Task Stage' _order = 'sequence, id' def _get_default_project_ids(self):", "action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If depth == 1, return only", "readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string=\"Main Attachments\", help=\"Attachment that", "{'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval > 0 and\\", "and not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image')", "Business Methods # --------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values): analytic_account =", "date_assign if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() # Stage change: Update", "project_id ).company_id.id or self.env.company.id if project_id and \"stage_id\" not in", "_sql_constraints = [ ('project_date_greater', 'check(date >= date_start)', 'Error! project start-date", "project containing tasks. You can either archive it or first", "self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals) # rating", "project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids: task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks'", "False), ('company_id', '=', company_id)]\", check_company=True, help=\"Analytic account to which this", "default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id or self.project_id ctx", "'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project", "users'), ('employees', 'All internal users'), ('portal', 'Invited portal users and", "rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id", "task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def", "from mail_thread task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids',", "project.task_count = result.get(project.id, 0) def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain']", "class Project(models.Model): _name = \"project.project\" _description = \"Project\" _inherit =", "'project_allowed_portal_users_rel', string=\"Allowed Portal Users\", domain=[('share', '=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count',", "if message.attachment_ids and not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype", "readonly=True) date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last", "= fields.Many2one('mail.alias', string='Alias', ondelete=\"restrict\", required=True, help=\"Internal email associated with this", "search_domain = [('id', 'in', stages.ids)] if 'default_project_id' in self.env.context: search_domain", "the done state for kanban selection, when the task or", "user in new_allowed_users]}) return res # ---------------------------------------- # Case management", "action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return { 'name':", "project.company_id.id, } def map_tasks(self, new_project_id): \"\"\" copy and map tasks", "0.0 task.working_days_close = 0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open',", "raise ValidationError(_('Error! You cannot create recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids')", "task in self: if task.partner_id: if task.project_id.partner_id: task.partner_id = task.project_id.partner_id", "however we do not # want the gateway user to", "1) if the project partner_id changes, the task partner_id is", "if task.partner_id and task.partner_id.email != task.partner_email: task.partner_email = task.partner_id.email def", "* (len(section_ids) - 1) for section_id in section_ids: search_domain.append(('project_ids', '=',", "vals.get('project_id') or self.env.context.get('default_project_id') if project_id and not \"company_id\" in vals:", "'=', section_id)) search_domain += list(domain) # perform search, return the", "('stage_id.fold', '=', False), ('stage_id', '=', False)]) color = fields.Integer(string='Color Index')", "= self.env[\"ir.actions.actions\"]._for_xml_id(\"analytic.account_analytic_line_action\") action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id', '=',", "@api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda task:", "want to get the customer's feedbacks.\") rating_status_period = fields.Selection([ ('daily',", "return super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If", "for project in self.filtered(lambda x: x.privacy_visibility != 'portal'): project.access_warning =", "def _compute_partner_email(self): for task in self: if task.partner_id and task.partner_id.email", "help=\"Project in which sub-tasks of the current project will be", "> fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count = 0 recurring_tasks =", "task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from) or", "to the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id'])", "def _compute_access_warning(self): super(Project, self)._compute_access_warning() for project in self.filtered(lambda x: x.privacy_visibility", "through message_process. This override updates the document according to the", "that is called by the mailgateway through message_process. This override", "# collect all section_ids section_ids = [] if section_id: section_ids.append(section_id)", "task partner_id changes, the task partner_id remains the same. \"\"\"", "elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id =", "remains the same. \"\"\" for task in self: if task.partner_id:", "required=True, tracking=True) description = fields.Html() active = fields.Boolean(default=True, help=\"If the", "vals and not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return super(ProjectTaskType,", "= fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string=\"Phone\", readonly=False, store=True, copy=False) ribbon_message =", "group_method, group_data in groups: if group_name in ('customer', 'user') or", "portal_users if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -=", "message.partner_ids.filtered(lambda partner: partner.email == self.email_from) if new_partner: self.search([ ('partner_id', '=',", "'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year')", "elif 'kanban_state_label' in init_values and self.kanban_state == 'done': return self.env.ref('project.mt_task_ready')", "write(self, vals): if 'active' in vals and not vals['active']: self.env['project.task'].search([('stage_id',", "stage search taken from the lead: - section_id: if set,", "\" \"with Tasks (or optionally Issues if the Issue Tracker", "index=True) description = fields.Html(string='Description') priority = fields.Selection([ ('0', 'Normal'), ('1',", "if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True)", "!= 'this': recurrence_domain = [] if recurrence_update == 'subsequent': for", "displayed_image_id, we couln't use attachment_ids because a one2many is represented", "project_id)]\", copy=False) tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([ ('normal',", "return project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self', lambda value: value.id) def", "tasks to their project if any. \"\"\" aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default,", "action['display_name'] = self.name return action def action_view_account_analytic_line(self): \"\"\" return the", "= 'read' _order = \"priority desc, sequence, id desc\" _check_company_auto", "= fields.Boolean(string=\"Recurrent\") recurring_count = fields.Integer(string=\"Tasks in Recurrence\", compute='_compute_recurring_count') recurrence_id =", "('is_closed', '=', False)]) else: task.stage_id = False @api.returns('self', lambda value:", "section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = [] if section_ids: search_domain =", "belong to this section or be a default stage; if", "default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until' in default_fields:", "(task.repeat_unit == 'year' and task.repeat_on_year == 'day') task.repeat_show_dow = task.recurring_task", "_compute_subtask_planned_hours(self): for task in self: task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours", "res def message_unsubscribe(self, partner_ids=None, channel_ids=None): \"\"\" Unsubscribe from all tasks", "task.repeat_type == 'after' else 5) delta = task.repeat_interval if task.repeat_unit", "one) # on a document without customer means that it", "!= task.partner_id.phone: task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self): for task in", "A good feedback from the customer will update the kanban", "'Visible by following customers' in order to make it accessible", "saving this change, the customer email and phone number will", "task.access_url = '/my/task/%s' % task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning() for", "self: task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month", "-= portal_users elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -= internal_users @api.depends('create_date',", "the mail gateway. Indeed we # do not want to", "'project.project'), ('res_id', '=', project.id), '&', ('res_model', '=', 'project.task'), ('res_id', 'in',", "channel_ids=None, subtype_ids=None): \"\"\" Subscribe to all existing active tasks when", "auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False, help=\"Automatically modify the kanban", "date_end if folded stage and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update']", "project duplication \"\"\" return { 'stage_id': task.stage_id.id, 'name': task.name, 'company_id':", "to get the customer's feedbacks.\") rating_status_period = fields.Selection([ ('daily', 'Daily'),", "= super(Project, self).write(vals) if vals else True if allowed_users_changed: for" ]
[ "APP_SETTINGS environment variable :return The config class instance :rtype: Config", "Staging Config is for... staging things \"\"\" DEBUG = True", "config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name, None) if", "class. \"\"\" pass def get_config(): \"\"\" Get the Config Class", "<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\" Config File for", "Test Config... You should be testing right now instead reading", "os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module,", "utf-8 -*- \"\"\" Config File for enviroment variables \"\"\" import", "TEST' class ConfigClassNotFound(Exception): \"\"\" Raises when the APP_SETTINGS environment variable", "= import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name, None) if not config_class:", "\"\"\" Staging Config is for... staging things \"\"\" DEBUG =", "ConfigClassNotFound('Unable to find a config class in {}'.format(os.environ['APP_SETTINGS'])) return config_class()", "right now instead reading docs!!! \"\"\" TESTING = True KEY_ON_TEST", "instance defined in APP_SETTINGS environment variable :return The config class", "variable have a value which does not point to an", "def get_config(): \"\"\" Get the Config Class instance defined in", "Config(object): \"\"\" Base class for all config variables \"\"\" DEBUG", "now instead reading docs!!! \"\"\" TESTING = True KEY_ON_TEST =", "= os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class =", "\"\"\" Get the Config Class instance defined in APP_SETTINGS environment", "Config File for enviroment variables \"\"\" import os from importlib", "pass def get_config(): \"\"\" Get the Config Class instance defined", "import_module class Config(object): \"\"\" Base class for all config variables", "False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY']", "DEVELOPMENT = False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY", "Development Config... this is your home developer! \"\"\" DEVELOPMENT =", "TESTING = False DEVELOPMENT = False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI", "\"\"\" TESTING = True KEY_ON_TEST = 'KEY ON TEST' class", "ProductionConfig(Config): \"\"\" Production Config... this is the real thing \"\"\"", "staging things \"\"\" DEBUG = True class DevelopmentConfig(Config): \"\"\" Development", "environment variable :return The config class instance :rtype: Config \"\"\"", "\"\"\" Config File for enviroment variables \"\"\" import os from", "developer! \"\"\" DEVELOPMENT = True DEBUG = True class TestingConfig(Config):", "Production Config... this is the real thing \"\"\" DEBUG =", "False class StagingConfig(Config): \"\"\" Staging Config is for... staging things", "False TESTING = False DEVELOPMENT = False CSRF_ENABLED = True", "= False class StagingConfig(Config): \"\"\" Staging Config is for... staging", "\"\"\" DEVELOPMENT = True DEBUG = True class TestingConfig(Config): \"\"\"", "class TestingConfig(Config): \"\"\" Test Config... You should be testing right", "class for all config variables \"\"\" DEBUG = False TESTING", "variables \"\"\" import os from importlib import import_module class Config(object):", "None) if not config_class: raise ConfigClassNotFound('Unable to find a config", "from importlib import import_module class Config(object): \"\"\" Base class for", "= getattr(config_module, config_class_name, None) if not config_class: raise ConfigClassNotFound('Unable to", "a value which does not point to an uninstantiable class.", "ConfigClassNotFound(Exception): \"\"\" Raises when the APP_SETTINGS environment variable have a", "TestingConfig(Config): \"\"\" Test Config... You should be testing right now", "reading docs!!! \"\"\" TESTING = True KEY_ON_TEST = 'KEY ON", "Config is for... staging things \"\"\" DEBUG = True class", "should be testing right now instead reading docs!!! \"\"\" TESTING", "= False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY =", "Config Class instance defined in APP_SETTINGS environment variable :return The", "getattr(config_module, config_class_name, None) if not config_class: raise ConfigClassNotFound('Unable to find", "= False TESTING = False DEVELOPMENT = False CSRF_ENABLED =", "= config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name, None)", "have a value which does not point to an uninstantiable", "is the real thing \"\"\" DEBUG = False class StagingConfig(Config):", "KEY_ON_TEST = 'KEY ON TEST' class ConfigClassNotFound(Exception): \"\"\" Raises when", "\"\"\" Raises when the APP_SETTINGS environment variable have a value", "DEBUG = True class DevelopmentConfig(Config): \"\"\" Development Config... this is", "= os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config): \"\"\" Production Config...", "The config class instance :rtype: Config \"\"\" config_imports = os.environ['APP_SETTINGS'].split('.')", "class StagingConfig(Config): \"\"\" Staging Config is for... staging things \"\"\"", "for enviroment variables \"\"\" import os from importlib import import_module", "True class DevelopmentConfig(Config): \"\"\" Development Config... this is your home", "= True class TestingConfig(Config): \"\"\" Test Config... You should be", "= True KEY_ON_TEST = 'KEY ON TEST' class ConfigClassNotFound(Exception): \"\"\"", "True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config): \"\"\"", "\"\"\" DEBUG = True class DevelopmentConfig(Config): \"\"\" Development Config... this", "\"\"\" Test Config... You should be testing right now instead", "import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name, None) if not config_class: raise", "= True class DevelopmentConfig(Config): \"\"\" Development Config... this is your", "to an uninstantiable class. \"\"\" pass def get_config(): \"\"\" Get", "is your home developer! \"\"\" DEVELOPMENT = True DEBUG =", "uninstantiable class. \"\"\" pass def get_config(): \"\"\" Get the Config", "be testing right now instead reading docs!!! \"\"\" TESTING =", "import os from importlib import import_module class Config(object): \"\"\" Base", "\"\"\" DEBUG = False TESTING = False DEVELOPMENT = False", "False DEVELOPMENT = False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']", "class instance :rtype: Config \"\"\" config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name =", "for... staging things \"\"\" DEBUG = True class DevelopmentConfig(Config): \"\"\"", "DEBUG = False class StagingConfig(Config): \"\"\" Staging Config is for...", "Config... You should be testing right now instead reading docs!!!", "DevelopmentConfig(Config): \"\"\" Development Config... this is your home developer! \"\"\"", "StagingConfig(Config): \"\"\" Staging Config is for... staging things \"\"\" DEBUG", "get_config(): \"\"\" Get the Config Class instance defined in APP_SETTINGS", "= 'KEY ON TEST' class ConfigClassNotFound(Exception): \"\"\" Raises when the", "class DevelopmentConfig(Config): \"\"\" Development Config... this is your home developer!", "class ConfigClassNotFound(Exception): \"\"\" Raises when the APP_SETTINGS environment variable have", "Config... this is your home developer! \"\"\" DEVELOPMENT = True", "config variables \"\"\" DEBUG = False TESTING = False DEVELOPMENT", "Base class for all config variables \"\"\" DEBUG = False", "your home developer! \"\"\" DEVELOPMENT = True DEBUG = True", "os.environ['SECRET_KEY'] class ProductionConfig(Config): \"\"\" Production Config... this is the real", "= os.environ['SECRET_KEY'] class ProductionConfig(Config): \"\"\" Production Config... this is the", "Config... this is the real thing \"\"\" DEBUG = False", "value which does not point to an uninstantiable class. \"\"\"", "is for... staging things \"\"\" DEBUG = True class DevelopmentConfig(Config):", "\"\"\" import os from importlib import import_module class Config(object): \"\"\"", "this is your home developer! \"\"\" DEVELOPMENT = True DEBUG", "APP_SETTINGS environment variable have a value which does not point", "True KEY_ON_TEST = 'KEY ON TEST' class ConfigClassNotFound(Exception): \"\"\" Raises", "docs!!! \"\"\" TESTING = True KEY_ON_TEST = 'KEY ON TEST'", "defined in APP_SETTINGS environment variable :return The config class instance", "point to an uninstantiable class. \"\"\" pass def get_config(): \"\"\"", "config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class", "\"\"\" Production Config... this is the real thing \"\"\" DEBUG", "does not point to an uninstantiable class. \"\"\" pass def", "instead reading docs!!! \"\"\" TESTING = True KEY_ON_TEST = 'KEY", "SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config): \"\"\" Production Config... this is", "home developer! \"\"\" DEVELOPMENT = True DEBUG = True class", "all config variables \"\"\" DEBUG = False TESTING = False", "DEBUG = False TESTING = False DEVELOPMENT = False CSRF_ENABLED", "if not config_class: raise ConfigClassNotFound('Unable to find a config class", "True class TestingConfig(Config): \"\"\" Test Config... You should be testing", "TESTING = True KEY_ON_TEST = 'KEY ON TEST' class ConfigClassNotFound(Exception):", "config_module = import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name, None) if not", "which does not point to an uninstantiable class. \"\"\" pass", "in APP_SETTINGS environment variable :return The config class instance :rtype:", "variable :return The config class instance :rtype: Config \"\"\" config_imports", "= True DEBUG = True class TestingConfig(Config): \"\"\" Test Config...", "DEVELOPMENT = True DEBUG = True class TestingConfig(Config): \"\"\" Test", "'KEY ON TEST' class ConfigClassNotFound(Exception): \"\"\" Raises when the APP_SETTINGS", "\"\"\" DEBUG = False class StagingConfig(Config): \"\"\" Staging Config is", ":rtype: Config \"\"\" config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module", "environment variable have a value which does not point to", "config_class_name, None) if not config_class: raise ConfigClassNotFound('Unable to find a", "an uninstantiable class. \"\"\" pass def get_config(): \"\"\" Get the", "testing right now instead reading docs!!! \"\"\" TESTING = True", "File for enviroment variables \"\"\" import os from importlib import", "= False DEVELOPMENT = False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI =", "config class instance :rtype: Config \"\"\" config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name", "\"\"\" pass def get_config(): \"\"\" Get the Config Class instance", "= True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config):", "\"\"\" config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module = import_module('.'.join(config_imports[:-1]))", "import import_module class Config(object): \"\"\" Base class for all config", "the APP_SETTINGS environment variable have a value which does not", "not point to an uninstantiable class. \"\"\" pass def get_config():", "os from importlib import import_module class Config(object): \"\"\" Base class", "variables \"\"\" DEBUG = False TESTING = False DEVELOPMENT =", "CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class", "things \"\"\" DEBUG = True class DevelopmentConfig(Config): \"\"\" Development Config...", "True DEBUG = True class TestingConfig(Config): \"\"\" Test Config... You", "for all config variables \"\"\" DEBUG = False TESTING =", "importlib import import_module class Config(object): \"\"\" Base class for all", "coding: utf-8 -*- \"\"\" Config File for enviroment variables \"\"\"", "thing \"\"\" DEBUG = False class StagingConfig(Config): \"\"\" Staging Config", "config_class_name = config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name,", "the real thing \"\"\" DEBUG = False class StagingConfig(Config): \"\"\"", "-*- coding: utf-8 -*- \"\"\" Config File for enviroment variables", "\"\"\" Development Config... this is your home developer! \"\"\" DEVELOPMENT", "Raises when the APP_SETTINGS environment variable have a value which", "config_class: raise ConfigClassNotFound('Unable to find a config class in {}'.format(os.environ['APP_SETTINGS']))", "config_class = getattr(config_module, config_class_name, None) if not config_class: raise ConfigClassNotFound('Unable", "raise ConfigClassNotFound('Unable to find a config class in {}'.format(os.environ['APP_SETTINGS'])) return", "Config \"\"\" config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module =", "the Config Class instance defined in APP_SETTINGS environment variable :return", "SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config): \"\"\" Production", "class Config(object): \"\"\" Base class for all config variables \"\"\"", ":return The config class instance :rtype: Config \"\"\" config_imports =", "class ProductionConfig(Config): \"\"\" Production Config... this is the real thing", "-*- \"\"\" Config File for enviroment variables \"\"\" import os", "DEBUG = True class TestingConfig(Config): \"\"\" Test Config... You should", "not config_class: raise ConfigClassNotFound('Unable to find a config class in", "enviroment variables \"\"\" import os from importlib import import_module class", "ON TEST' class ConfigClassNotFound(Exception): \"\"\" Raises when the APP_SETTINGS environment", "Class instance defined in APP_SETTINGS environment variable :return The config", "instance :rtype: Config \"\"\" config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1]", "\"\"\" Base class for all config variables \"\"\" DEBUG =", "this is the real thing \"\"\" DEBUG = False class", "You should be testing right now instead reading docs!!! \"\"\"", "when the APP_SETTINGS environment variable have a value which does", "# -*- coding: utf-8 -*- \"\"\" Config File for enviroment", "real thing \"\"\" DEBUG = False class StagingConfig(Config): \"\"\" Staging", "Get the Config Class instance defined in APP_SETTINGS environment variable", "os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config): \"\"\" Production Config... this" ]
[ "from postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao import DmlDao as PsqlDmlDao", "DdlDao from postgres_dao.dml_dao import DmlDao as PsqlDmlDao psql_ddl_dao = DdlDao()", "zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data", "= mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes()", "mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt()", "county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data =", "MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data", "psql_ddl_dao = DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables()", "PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao = PsqlDmlDao()", "PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data)", "DmlDao as PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao", "psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data =", "mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data)", "data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data = mysql_select_dao.select_full_mls_daily_rpt() psql_dml_dao.trunc_mls_rpt() psql_dml_dao.insert_mls_rpt(data)", "city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data =", "psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data = mysql_select_dao.select_full_mls_daily_rpt() psql_dml_dao.trunc_mls_rpt()", "mysql_dao.select_dao import SelectDao as MysqlSelectDao from postgres_dao.ddl_dao import DdlDao from", "as MysqlSelectDao from postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao import DmlDao", "postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao import DmlDao as PsqlDmlDao psql_ddl_dao", "SelectDao as MysqlSelectDao from postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao import", "as PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao =", "postgres_dao.dml_dao import DmlDao as PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao =", "mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data = mysql_select_dao.select_full_mls_daily_rpt() psql_dml_dao.trunc_mls_rpt() psql_dml_dao.insert_mls_rpt(data) mysql_select_dao.close() psql_dml_dao.close()", "from postgres_dao.dml_dao import DmlDao as PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao", "= mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt()", "psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data", "psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data)", "= mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data =", "psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data", "= DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data", "import DmlDao as PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao = MysqlSelectDao()", "= mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data = mysql_select_dao.select_full_mls_daily_rpt() psql_dml_dao.trunc_mls_rpt() psql_dml_dao.insert_mls_rpt(data) mysql_select_dao.close()", "MysqlSelectDao from postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao import DmlDao as", "= PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities()", "<reponame>hongyuanChrisLi/RealEstateDBConvert from mysql_dao.select_dao import SelectDao as MysqlSelectDao from postgres_dao.ddl_dao import", "from mysql_dao.select_dao import SelectDao as MysqlSelectDao from postgres_dao.ddl_dao import DdlDao", "import DdlDao from postgres_dao.dml_dao import DmlDao as PsqlDmlDao psql_ddl_dao =", "= MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data)", "mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data = mysql_select_dao.select_full_mls_daily_rpt()", "DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data =", "import SelectDao as MysqlSelectDao from postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao", "mysql_select_dao = MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties()" ]
[ "bucket in rest.get_buckets(): self.log.info(\"*****************************\\ bucket %s: memUsed %s\\ ****************************\" %", "Memory\" error (UI) \"\"\" def test_load_memory(self): num_items = self.quota *", "False) number = 2**degree first = ['james', 'sharon'] template =", "template, [number,], first, start=0, end=self.num_items) self.log.info(\"create %s documents...\" % (self.num_items))", "for Couchbase should be approx. 75% (12G) of total RAM.", "items\") remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:]", "****************************\" % (bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random items (ratio \\ of", "compression=self.sdk_compression) load.result() self.log.info(\"Insert new items or update existing items across", "if error: self.log.info(\"Unable to create documents as expected: %s\" %", "load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result()", "0) self._verify_stats_all_buckets([self.master]) except Exception as e: if error: self.log.info(\"Unable to", "Memcached Buckets. 2) Total memory quota allocated for Couchbase should", "'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for", "= DocumentGenerator('test_docs', template, [number,], first, start=0, end=self.num_items) self.log.info(\"create %s documents...\"", "6) Expire/Delete/update random items (ratio of expiration vs delete ~=", "6600) gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600, end=current_num", "items or update existing items across buckets 10) See if", "self.buckets: if bucket.type != 'memcached': bucket_to_load = bucket break new_num_items", "new items upto high_wat_mark (75% of memory quota)\") for bucket", "compression=self.sdk_compression) load.result() end_time = time.time() + 60*60*3 while time.time() <", "bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task in tasks: task.result()", "gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num + 5000) gen_expire", "test_load_memory(self): num_items = self.quota * 1024 * 0.6 / self.value_size", "gen_load, \"create\", 0) self._verify_stats_all_buckets([self.master]) except Exception as e: if error:", "RAM. 3) Load initial data on all buckets upto 60%", "\\ of expiration vs delete ~= 8:2)\") current_num = 0", "and do the following (5) to (8) 5) Insert new", "buckets 10) See if we can run into \"Hard out", "= bucket break new_num_items = self.quota * 1024 * 0.15", "wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==', num_items * 3)", "self).tearDown() def test_docs_int_big_values(self): degree = self.input.param(\"degree\", 53) error = self.input.param(\"error\",", "'update', exp=1, compression=self.sdk_compression) load.result() self.log.info(\"Insert new items or update existing", "0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==', num_items *", "import BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client import RestConnection", "quota\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load,", "Configure a cluster with 4 Couchbase Buckets and 1 Memcached", "allocated for Couchbase should be approx. 75% (12G) of total", "for bucket in self.buckets: if bucket.type != 'memcached': bucket_to_load =", "/ self.value_size num_items = num_items / len(self.buckets) self.log.info(\"Load initial data", "items across buckets\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items +", "gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task in tasks: task.result() current_num", "tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task in tasks:", "(8) 5) Insert new items upto high_wat_mark (75% of memory", "Insert new items or update existing items across buckets 10)", "start=current_num + 5000, end=current_num + 6600) gen_delete = BlobGenerator('mike', 'mike-',", "= RestConnection(self.master) for bucket in rest.get_buckets(): self.log.info(\"*****************************\\ bucket %s: memUsed", "Buckets. 2) Total memory quota allocated for Couchbase should be", "we can run into \"Hard out of Memory\" error (UI)", "60% of each memory quota\") gen_load = BlobGenerator('mike', 'mike-', self.value_size,", "7) Repeat (6) until \"ep_total_del_items\" is ~= (3 X #", "def test_docs_int_big_values(self): degree = self.input.param(\"degree\", 53) error = self.input.param(\"error\", False)", "gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load, \"create\",", "current_num = 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==',", "% str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1) Configure a cluster with 4", "end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)", "self.input.param(\"degree\", 53) error = self.input.param(\"error\", False) number = 2**degree first", "couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp() def", "for task in tasks: task.result() current_num += 7000 self.log.info(\"Expire 90%", "self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result() self.log.info(\"Insert new", "of memory quota)\") for bucket in self.buckets: if bucket.type !=", "documents...\" % (self.num_items)) try: self._load_all_buckets(self.master, gen_load, \"create\", 0) self._verify_stats_all_buckets([self.master]) except", "update existing items across buckets 10) See if we can", "= ['james', 'sharon'] template = '{{ \"number\": {0}, \"first_name\": \"{1}\"", "one bucket and do the following (5) to (8) 5)", "}}' gen_load = DocumentGenerator('test_docs', template, [number,], first, start=0, end=self.num_items) self.log.info(\"create", "1024 * 0.6 / self.value_size num_items = num_items / len(self.buckets)", "quota 4) Pick one bucket and do the following (5)", "error: self.log.info(\"Unable to create documents as expected: %s\" % str(e))", "self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time = time.time()", "Exception as e: if error: self.log.info(\"Unable to create documents as", "quota)\") for bucket in self.buckets: if bucket.type != 'memcached': bucket_to_load", "self.log.info(\"Unable to create documents as expected: %s\" % str(e)) else:", "= time.time() + 60*60*3 while time.time() < end_time: self.log.info(\"check memUsed\")", "'{{ \"number\": {0}, \"first_name\": \"{1}\" }}' gen_load = DocumentGenerator('test_docs', template,", "6600, end=current_num + 7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update,", "BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600, end=current_num + 7000) tasks", "2) Total memory quota allocated for Couchbase should be approx.", "membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase): def", "items 9) Insert new items or update existing items across", "buckets upto 60% of each memory quota\") gen_load = BlobGenerator('mike',", "and 1 Memcached Buckets. 2) Total memory quota allocated for", "0.6 / self.value_size num_items = num_items / len(self.buckets) self.log.info(\"Load initial", "= BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,", "#docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1) Configure a cluster with 4 Couchbase Buckets", "% str(e)) else: raise e else: if error: self.fail(\"Able to", "quota allocated for Couchbase should be approx. 75% (12G) of", "\"create\", 0) self.log.info(\"Insert new items upto high_wat_mark (75% of memory", "\"\"\" 1) Configure a cluster with 4 Couchbase Buckets and", "gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items + num_items) load", "bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time = time.time() +", "(75% of memory quota)\") for bucket in self.buckets: if bucket.type", "end=self.num_items) self.log.info(\"create %s documents...\" % (self.num_items)) try: self._load_all_buckets(self.master, gen_load, \"create\",", "gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time = time.time() + 60*60*3", "= self.quota * 1024 * 0.15 / self.value_size gen_load =", "self._load_all_buckets(self.master, gen_load, \"create\", 0) self.log.info(\"Insert new items upto high_wat_mark (75%", "gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete',", "start=current_num + 6600, end=current_num + 7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master,", "self.quota * 1024 * 0.6 / self.value_size num_items = num_items", "out of Memory\" error (UI) \"\"\" def test_load_memory(self): num_items =", "with 4 Couchbase Buckets and 1 Memcached Buckets. 2) Total", "a cluster with 4 Couchbase Buckets and 1 Memcached Buckets.", "1 Memcached Buckets. 2) Total memory quota allocated for Couchbase", "update existing items across buckets\") gen_load = BlobGenerator('mike', 'mike-', self.value_size,", "e else: if error: self.fail(\"Able to create documents with value:", "DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp() def tearDown(self): super(DocsTests, self).tearDown() def", "load.result() end_time = time.time() + 60*60*3 while time.time() < end_time:", "should be approx. 75% (12G) of total RAM. 3) Load", "\"number\": {0}, \"first_name\": \"{1}\" }}' gen_load = DocumentGenerator('test_docs', template, [number,],", "create documents with value: %s\" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1)", "end=new_num_items + num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create',", "self.value_size, start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update',", "self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==', num_items * 3) while wait_task.state", "4) Pick one bucket and do the following (5) to", "following (5) to (8) 5) Insert new items upto high_wat_mark", "_ = bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:] gen_expire =", "(self.num_items)) try: self._load_all_buckets(self.master, gen_load, \"create\", 0) self._verify_stats_all_buckets([self.master]) except Exception as", "~= 8:2)\") current_num = 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all',", "\"create\", 0) self._verify_stats_all_buckets([self.master]) except Exception as e: if error: self.log.info(\"Unable", "run into \"Hard out of Memory\" error (UI) \"\"\" def", "'ep_total_del_items', '==', num_items * 3) while wait_task.state != \"FINISHED\": gen_update", "'mike-', self.value_size, start=num_items, end=new_num_items + num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,", "of total RAM. 3) Load initial data on all buckets", "memory quota allocated for Couchbase should be approx. 75% (12G)", "across buckets 10) See if we can run into \"Hard", "end_time: self.log.info(\"check memUsed\") rest = RestConnection(self.master) for bucket in rest.get_buckets():", "4 Couchbase Buckets and 1 Memcached Buckets. 2) Total memory", "!= \"FINISHED\": gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num +", "tasks: task.result() current_num += 7000 self.log.info(\"Expire 90% of remaining items\")", "self.value_size, start=current_num + 6600, end=current_num + 7000) tasks = []", "[number,], first, start=0, end=self.num_items) self.log.info(\"create %s documents...\" % (self.num_items)) try:", "from couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp()", "\"first_name\": \"{1}\" }}' gen_load = DocumentGenerator('test_docs', template, [number,], first, start=0,", "memory quota 4) Pick one bucket and do the following", "0) self.log.info(\"Insert new items upto high_wat_mark (75% of memory quota)\")", "first, start=0, end=self.num_items) self.log.info(\"create %s documents...\" % (self.num_items)) try: self._load_all_buckets(self.master,", "gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600, end=current_num +", "str(e)) else: raise e else: if error: self.fail(\"Able to create", "new items upto high_wat_mark (75% of memory quota) 6) Expire/Delete/update", "len(self.buckets) self.log.info(\"Load initial data on all buckets upto 60% of", "\"Hard out of Memory\" error (UI) \"\"\" def test_load_memory(self): num_items", "being loaded in (3)) 8) Expire 90% of remaining items", "if we can run into \"Hard out of Memory\" error", "3) while wait_task.state != \"FINISHED\": gen_update = BlobGenerator('mike', 'mike-', self.value_size,", "BlobGenerator class DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp() def tearDown(self): super(DocsTests,", "import time import logger from basetestcase import BaseTestCase from couchbase_helper.documentgenerator", "end=num_items) self._load_all_buckets(self.master, gen_load, \"create\", 0) self.log.info(\"Insert new items upto high_wat_mark", "error = self.input.param(\"error\", False) number = 2**degree first = ['james',", "or update existing items across buckets\") gen_load = BlobGenerator('mike', 'mike-',", "= self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==', num_items * 3) while", "documents with value: %s\" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1) Configure", "self.log.info(\"Expire/Delete/update random items (ratio \\ of expiration vs delete ~=", "basetestcase import BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client import", "upto high_wat_mark (75% of memory quota) 6) Expire/Delete/update random items", "8:2)\") current_num = 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items',", "cluster with 4 Couchbase Buckets and 1 Memcached Buckets. 2)", "start=current_num, end=current_num + 5000) gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num", "BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire,", "with value: %s\" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1) Configure a", "setUp(self): super(DocsTests, self).setUp() def tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree", "documents as expected: %s\" % str(e)) else: raise e else:", "raise e else: if error: self.fail(\"Able to create documents with", "error (UI) \"\"\" def test_load_memory(self): num_items = self.quota * 1024", "end_time = time.time() + 60*60*3 while time.time() < end_time: self.log.info(\"check", "'memcached': bucket_to_load = bucket break new_num_items = self.quota * 1024", "self.value_size num_items = num_items / len(self.buckets) self.log.info(\"Load initial data on", "break new_num_items = self.quota * 1024 * 0.15 / self.value_size", "7000 self.log.info(\"Expire 90% of remaining items\") remain_keys, _ = bucket_to_load.kvs[1].key_set()", "bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result() self.log.info(\"Insert new items", "or update existing items across buckets 10) See if we", "num_items = self.quota * 1024 * 0.6 / self.value_size num_items", "BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items + num_items) load = self.cluster.async_load_gen_docs(self.master,", "task in tasks: task.result() current_num += 7000 self.log.info(\"Expire 90% of", "upto 60% of each memory quota 4) Pick one bucket", "'mike-', self.value_size, start=current_num + 5000, end=current_num + 6600) gen_delete =", "'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load, \"create\", 0) self.log.info(\"Insert new", "(5) to (8) 5) Insert new items upto high_wat_mark (75%", "number = 2**degree first = ['james', 'sharon'] template = '{{", "into \"Hard out of Memory\" error (UI) \"\"\" def test_load_memory(self):", "(3 X # of items being loaded in (3)) 8)", "bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-',", "remain_keys[0.9 * len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire)", "%s\" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1) Configure a cluster with", "in self.buckets: if bucket.type != 'memcached': bucket_to_load = bucket break", "num_items / len(self.buckets) self.log.info(\"Load initial data on all buckets upto", "%s: memUsed %s\\ ****************************\" % (bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random items", "len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire) load =", "'all', 'ep_total_del_items', '==', num_items * 3) while wait_task.state != \"FINISHED\":", "compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task in", "= self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time =", "60*60*3 while time.time() < end_time: self.log.info(\"check memUsed\") rest = RestConnection(self.master)", "new items or update existing items across buckets 10) See", "from basetestcase import BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client", "Load initial data on all buckets upto 60% of each", "is ~= (3 X # of items being loaded in", "exp=1, compression=self.sdk_compression) load.result() self.log.info(\"Insert new items or update existing items", "vs delete ~= 8:2) 7) Repeat (6) until \"ep_total_del_items\" is", "in rest.get_buckets(): self.log.info(\"*****************************\\ bucket %s: memUsed %s\\ ****************************\" % (bucket.name,", "7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression))", "tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master,", "Expire/Delete/update random items (ratio of expiration vs delete ~= 8:2)", "end=current_num + 6600) gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num +", "new items or update existing items across buckets\") gen_load =", "~= 8:2) 7) Repeat (6) until \"ep_total_del_items\" is ~= (3", "8) Expire 90% of remaining items 9) Insert new items", "loaded in (3)) 8) Expire 90% of remaining items 9)", "!= 'memcached': bucket_to_load = bucket break new_num_items = self.quota *", "= BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load, \"create\", 0)", "time import logger from basetestcase import BaseTestCase from couchbase_helper.documentgenerator import", "upto high_wat_mark (75% of memory quota)\") for bucket in self.buckets:", "= BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000, end=current_num + 6600)", "self.value_size gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items + num_items)", "import logger from basetestcase import BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator", "bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task in tasks: task.result() current_num +=", "0.15 / self.value_size gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items", "def test_load_memory(self): num_items = self.quota * 1024 * 0.6 /", "Expire 90% of remaining items 9) Insert new items or", "exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task", "Insert new items upto high_wat_mark (75% of memory quota) 6)", "% (self.num_items)) try: self._load_all_buckets(self.master, gen_load, \"create\", 0) self._verify_stats_all_buckets([self.master]) except Exception", "rest.get_buckets(): self.log.info(\"*****************************\\ bucket %s: memUsed %s\\ ****************************\" % (bucket.name, bucket.stats.memUsed))", "'delete', compression=self.sdk_compression)) for task in tasks: task.result() current_num += 7000", "* 1024 * 0.6 / self.value_size num_items = num_items /", "else: raise e else: if error: self.fail(\"Able to create documents", "%s\" % str(e)) else: raise e else: if error: self.fail(\"Able", "* 0.15 / self.value_size gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items,", "(ratio \\ of expiration vs delete ~= 8:2)\") current_num =", "\"{1}\" }}' gen_load = DocumentGenerator('test_docs', template, [number,], first, start=0, end=self.num_items)", "upto 60% of each memory quota\") gen_load = BlobGenerator('mike', 'mike-',", "gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000, end=current_num +", "bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random items (ratio \\ of expiration vs delete", "else: if error: self.fail(\"Able to create documents with value: %s\"", "items being loaded in (3)) 8) Expire 90% of remaining", "last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-', self.value_size,", "on all buckets upto 60% of each memory quota 4)", "bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression))", "of each memory quota\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0,", "items upto high_wat_mark (75% of memory quota)\") for bucket in", "self.quota * 1024 * 0.15 / self.value_size gen_load = BlobGenerator('mike',", "BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000, end=current_num + 6600) gen_delete", "self.log.info(\"create %s documents...\" % (self.num_items)) try: self._load_all_buckets(self.master, gen_load, \"create\", 0)", "< end_time: self.log.info(\"check memUsed\") rest = RestConnection(self.master) for bucket in", "value: %s\" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1) Configure a cluster", "bucket_to_load, 'all', 'ep_total_del_items', '==', num_items * 3) while wait_task.state !=", "vs delete ~= 8:2)\") current_num = 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init],", "(UI) \"\"\" def test_load_memory(self): num_items = self.quota * 1024 *", "rest = RestConnection(self.master) for bucket in rest.get_buckets(): self.log.info(\"*****************************\\ bucket %s:", "initial data on all buckets upto 60% of each memory", "quota) 6) Expire/Delete/update random items (ratio of expiration vs delete", "(bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random items (ratio \\ of expiration vs", "template = '{{ \"number\": {0}, \"first_name\": \"{1}\" }}' gen_load =", "= self.quota * 1024 * 0.6 / self.value_size num_items =", "/ self.value_size gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items +", "+ 7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update',", "data on all buckets upto 60% of each memory quota\")", "(3)) 8) Expire 90% of remaining items 9) Insert new", "start=new_num_items + num_items, end=new_num_items * 2 + num_items) self._load_all_buckets(self.master, gen_load,", "for bucket in rest.get_buckets(): self.log.info(\"*****************************\\ bucket %s: memUsed %s\\ ****************************\"", "tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree = self.input.param(\"degree\", 53) error", "new_num_items = self.quota * 1024 * 0.15 / self.value_size gen_load", "bucket_to_load = bucket break new_num_items = self.quota * 1024 *", "Repeat (6) until \"ep_total_del_items\" is ~= (3 X # of", "from membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase):", "self.value_size, start=new_num_items + num_items, end=new_num_items * 2 + num_items) self._load_all_buckets(self.master,", "items (ratio \\ of expiration vs delete ~= 8:2)\") current_num", "existing items across buckets\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items", "'mike-', self.value_size, start=current_num + 6600, end=current_num + 7000) tasks =", "self.value_size, start=num_items, end=new_num_items + num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load,", "bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result() self.log.info(\"Insert new items or update", "90% of remaining items\") remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire =", "gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master,", "(75% of memory quota) 6) Expire/Delete/update random items (ratio of", "from couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator", "try: self._load_all_buckets(self.master, gen_load, \"create\", 0) self._verify_stats_all_buckets([self.master]) except Exception as e:", "= 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==', num_items", "+ 5000) gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000,", "total RAM. 3) Load initial data on all buckets upto", "items (ratio of expiration vs delete ~= 8:2) 7) Repeat", "on all buckets upto 60% of each memory quota\") gen_load", "Pick one bucket and do the following (5) to (8)", "import BlobGenerator class DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp() def tearDown(self):", "Couchbase Buckets and 1 Memcached Buckets. 2) Total memory quota", "5000) gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000, end=current_num", "of Memory\" error (UI) \"\"\" def test_load_memory(self): num_items = self.quota", "tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete,", "as e: if error: self.log.info(\"Unable to create documents as expected:", "10) See if we can run into \"Hard out of", "= 2**degree first = ['james', 'sharon'] template = '{{ \"number\":", "bucket break new_num_items = self.quota * 1024 * 0.15 /", "= BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num + 5000) gen_expire =", "* 1024 * 0.15 / self.value_size gen_load = BlobGenerator('mike', 'mike-',", "of each memory quota 4) Pick one bucket and do", "/ len(self.buckets) self.log.info(\"Load initial data on all buckets upto 60%", "'mike-', self.value_size, start=current_num, end=current_num + 5000) gen_expire = BlobGenerator('mike', 'mike-',", "'mike-', self.value_size, start=new_num_items + num_items, end=new_num_items * 2 + num_items)", "3) Load initial data on all buckets upto 60% of", "of memory quota) 6) Expire/Delete/update random items (ratio of expiration", "# of items being loaded in (3)) 8) Expire 90%", "bucket in self.buckets: if bucket.type != 'memcached': bucket_to_load = bucket", "except Exception as e: if error: self.log.info(\"Unable to create documents", "error: self.fail(\"Able to create documents with value: %s\" % str(number))", "be approx. 75% (12G) of total RAM. 3) Load initial", "to create documents with value: %s\" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\"", "self._verify_stats_all_buckets([self.master]) except Exception as e: if error: self.log.info(\"Unable to create", "self.input.param(\"error\", False) number = 2**degree first = ['james', 'sharon'] template", "while time.time() < end_time: self.log.info(\"check memUsed\") rest = RestConnection(self.master) for", "bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time = time.time() + 60*60*3 while", "time.time() < end_time: self.log.info(\"check memUsed\") rest = RestConnection(self.master) for bucket", "end=current_num + 7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1],", "num_items = num_items / len(self.buckets) self.log.info(\"Load initial data on all", "X # of items being loaded in (3)) 8) Expire", "'sharon'] template = '{{ \"number\": {0}, \"first_name\": \"{1}\" }}' gen_load", "super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree = self.input.param(\"degree\", 53) error =", "can run into \"Hard out of Memory\" error (UI) \"\"\"", "each memory quota 4) Pick one bucket and do the", "buckets\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items, end=new_num_items", "delete ~= 8:2)\") current_num = 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load,", "remaining items\") remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 *", "high_wat_mark (75% of memory quota) 6) Expire/Delete/update random items (ratio", "= self.input.param(\"error\", False) number = 2**degree first = ['james', 'sharon']", "bucket %s: memUsed %s\\ ****************************\" % (bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random", "self._load_all_buckets(self.master, gen_load, \"create\", 0) self._verify_stats_all_buckets([self.master]) except Exception as e: if", "start=num_items, end=new_num_items + num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1],", "in (3)) 8) Expire 90% of remaining items 9) Insert", "wait_task.state != \"FINISHED\": gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num", "9) Insert new items or update existing items across buckets", "= BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items, end=new_num_items * 2", "~= (3 X # of items being loaded in (3))", "as expected: %s\" % str(e)) else: raise e else: if", "bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression))", "+ num_items, end=new_num_items * 2 + num_items) self._load_all_buckets(self.master, gen_load, \"create\",", "class DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp() def tearDown(self): super(DocsTests, self).tearDown()", "existing items across buckets 10) See if we can run", "1024 * 0.15 / self.value_size gen_load = BlobGenerator('mike', 'mike-', self.value_size,", "degree = self.input.param(\"degree\", 53) error = self.input.param(\"error\", False) number =", "90% of remaining items 9) Insert new items or update", "of expiration vs delete ~= 8:2) 7) Repeat (6) until", "create documents as expected: %s\" % str(e)) else: raise e", "self.log.info(\"Expire 90% of remaining items\") remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire", "5) Insert new items upto high_wat_mark (75% of memory quota)", "e: if error: self.log.info(\"Unable to create documents as expected: %s\"", "tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1],", "num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result()", "* len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire) load", "= BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600, end=current_num + 7000)", "import DocumentGenerator from membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator import BlobGenerator", "the following (5) to (8) 5) Insert new items upto", "to create documents as expected: %s\" % str(e)) else: raise", "items or update existing items across buckets\") gen_load = BlobGenerator('mike',", "self.fail(\"Able to create documents with value: %s\" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75", "items across buckets 10) See if we can run into", "self.log.info(\"Insert new items upto high_wat_mark (75% of memory quota)\") for", "all buckets upto 60% of each memory quota 4) Pick", "each memory quota\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items)", "time.time() + 60*60*3 while time.time() < end_time: self.log.info(\"check memUsed\") rest", "task.result() current_num += 7000 self.log.info(\"Expire 90% of remaining items\") remain_keys,", "do the following (5) to (8) 5) Insert new items", "2**degree first = ['james', 'sharon'] template = '{{ \"number\": {0},", "gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items, end=new_num_items *", "'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master,", "5000, end=current_num + 6600) gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num", "+= 7000 self.log.info(\"Expire 90% of remaining items\") remain_keys, _ =", "RestConnection from couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase): def setUp(self): super(DocsTests,", "def tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree = self.input.param(\"degree\", 53)", "couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator import", "compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,", "gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1,", "super(DocsTests, self).setUp() def tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree =", "Total memory quota allocated for Couchbase should be approx. 75%", "Couchbase should be approx. 75% (12G) of total RAM. 3)", "+ 60*60*3 while time.time() < end_time: self.log.info(\"check memUsed\") rest =", "random items (ratio of expiration vs delete ~= 8:2) 7)", "= '{{ \"number\": {0}, \"first_name\": \"{1}\" }}' gen_load = DocumentGenerator('test_docs',", "self.value_size, start=current_num + 5000, end=current_num + 6600) gen_delete = BlobGenerator('mike',", "end=current_num + 5000) gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num +", "+ 6600, end=current_num + 7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,", "self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load, \"create\", 0) self.log.info(\"Insert new items", "bucket and do the following (5) to (8) 5) Insert", "= BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items + num_items) load =", "= self.input.param(\"degree\", 53) error = self.input.param(\"error\", False) number = 2**degree", "of items being loaded in (3)) 8) Expire 90% of", "* 3) while wait_task.state != \"FINISHED\": gen_update = BlobGenerator('mike', 'mike-',", "'==', num_items * 3) while wait_task.state != \"FINISHED\": gen_update =", "if bucket.type != 'memcached': bucket_to_load = bucket break new_num_items =", "data on all buckets upto 60% of each memory quota", "= num_items / len(self.buckets) self.log.info(\"Load initial data on all buckets", "if error: self.fail(\"Able to create documents with value: %s\" %", "all buckets upto 60% of each memory quota\") gen_load =", "load.result() self.log.info(\"Insert new items or update existing items across buckets\")", "while wait_task.state != \"FINISHED\": gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num,", "compression=self.sdk_compression)) for task in tasks: task.result() current_num += 7000 self.log.info(\"Expire", "\"FINISHED\": gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num + 5000)", "'create', compression=self.sdk_compression) load.result() end_time = time.time() + 60*60*3 while time.time()", "RestConnection(self.master) for bucket in rest.get_buckets(): self.log.info(\"*****************************\\ bucket %s: memUsed %s\\", "memory quota\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master,", "75% (12G) of total RAM. 3) Load initial data on", "test_docs_int_big_values(self): degree = self.input.param(\"degree\", 53) error = self.input.param(\"error\", False) number", "\"ep_total_del_items\" is ~= (3 X # of items being loaded", "buckets upto 60% of each memory quota 4) Pick one", "self).setUp() def tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree = self.input.param(\"degree\",", "60% of each memory quota 4) Pick one bucket and", "approx. 75% (12G) of total RAM. 3) Load initial data", "gen_load, \"create\", 0) self.log.info(\"Insert new items upto high_wat_mark (75% of", "remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:] gen_expire", "items upto high_wat_mark (75% of memory quota) 6) Expire/Delete/update random", "start=0, end=num_items) self._load_all_buckets(self.master, gen_load, \"create\", 0) self.log.info(\"Insert new items upto", "memUsed %s\\ ****************************\" % (bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random items (ratio", "+ num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression)", "(12G) of total RAM. 3) Load initial data on all", "self.log.info(\"Load initial data on all buckets upto 60% of each", "random items (ratio \\ of expiration vs delete ~= 8:2)\")", "high_wat_mark (75% of memory quota)\") for bucket in self.buckets: if", "DocumentGenerator('test_docs', template, [number,], first, start=0, end=self.num_items) self.log.info(\"create %s documents...\" %", "start=0, end=self.num_items) self.log.info(\"create %s documents...\" % (self.num_items)) try: self._load_all_buckets(self.master, gen_load,", "See if we can run into \"Hard out of Memory\"", "in tasks: task.result() current_num += 7000 self.log.info(\"Expire 90% of remaining", "BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items, end=new_num_items * 2 +", "num_items * 3) while wait_task.state != \"FINISHED\": gen_update = BlobGenerator('mike',", "str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 \"\"\" 1) Configure a cluster with 4 Couchbase", "+ 6600) gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600,", "1) Configure a cluster with 4 Couchbase Buckets and 1", "{0}, \"first_name\": \"{1}\" }}' gen_load = DocumentGenerator('test_docs', template, [number,], first,", "bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update',", "BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load, \"create\", 0) self.log.info(\"Insert", "until \"ep_total_del_items\" is ~= (3 X # of items being", "* 0.6 / self.value_size num_items = num_items / len(self.buckets) self.log.info(\"Load", "of remaining items\") remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9", "of remaining items 9) Insert new items or update existing", "['james', 'sharon'] template = '{{ \"number\": {0}, \"first_name\": \"{1}\" }}'", "start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1,", "self.log.info(\"*****************************\\ bucket %s: memUsed %s\\ ****************************\" % (bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update", "BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client import RestConnection from", "= [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,", "first = ['james', 'sharon'] template = '{{ \"number\": {0}, \"first_name\":", "import RestConnection from couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase): def setUp(self):", "num_items, end=new_num_items * 2 + num_items) self._load_all_buckets(self.master, gen_load, \"create\", 0)", "expiration vs delete ~= 8:2) 7) Repeat (6) until \"ep_total_del_items\"", "\"\"\" def test_load_memory(self): num_items = self.quota * 1024 * 0.6", "= bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:] gen_expire = BlobGenerator('mike',", "memory quota) 6) Expire/Delete/update random items (ratio of expiration vs", "(6) until \"ep_total_del_items\" is ~= (3 X # of items", "remaining items 9) Insert new items or update existing items", "= self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result() self.log.info(\"Insert", "+ 5000, end=current_num + 6600) gen_delete = BlobGenerator('mike', 'mike-', self.value_size,", "%s documents...\" % (self.num_items)) try: self._load_all_buckets(self.master, gen_load, \"create\", 0) self._verify_stats_all_buckets([self.master])", "(ratio of expiration vs delete ~= 8:2) 7) Repeat (6)", "current_num += 7000 self.log.info(\"Expire 90% of remaining items\") remain_keys, _", "BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num + 5000) gen_expire = BlobGenerator('mike',", "53) error = self.input.param(\"error\", False) number = 2**degree first =", "memUsed\") rest = RestConnection(self.master) for bucket in rest.get_buckets(): self.log.info(\"*****************************\\ bucket", "expiration vs delete ~= 8:2)\") current_num = 0 wait_task =", "self.log.info(\"Insert new items or update existing items across buckets\") gen_load", "%s\\ ****************************\" % (bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random items (ratio \\", "load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time", "bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1],", "DocumentGenerator from membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator import BlobGenerator class", "8:2) 7) Repeat (6) until \"ep_total_del_items\" is ~= (3 X", "logger from basetestcase import BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator from", "self.value_size, start=current_num, end=current_num + 5000) gen_expire = BlobGenerator('mike', 'mike-', self.value_size,", "gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result() self.log.info(\"Insert new items or", "bucket.type != 'memcached': bucket_to_load = bucket break new_num_items = self.quota", "'mike-', self.value_size, start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1],", "Buckets and 1 Memcached Buckets. 2) Total memory quota allocated", "of expiration vs delete ~= 8:2)\") current_num = 0 wait_task", "gen_load = DocumentGenerator('test_docs', template, [number,], first, start=0, end=self.num_items) self.log.info(\"create %s", "expected: %s\" % str(e)) else: raise e else: if error:", "self.log.info(\"check memUsed\") rest = RestConnection(self.master) for bucket in rest.get_buckets(): self.log.info(\"*****************************\\", "% (bucket.name, bucket.stats.memUsed)) self.log.info(\"Expire/Delete/update random items (ratio \\ of expiration", "delete ~= 8:2) 7) Repeat (6) until \"ep_total_del_items\" is ~=", "def setUp(self): super(DocsTests, self).setUp() def tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self):", "memory quota)\") for bucket in self.buckets: if bucket.type != 'memcached':", "= remain_keys[0.9 * len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0,", "across buckets\") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items,", "to (8) 5) Insert new items upto high_wat_mark (75% of", "[] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire," ]
[ "ca :('.format(sub, id_)) return else: print('get excel URL!!!') excel_url =", "BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag", "input('Nhap ten mon: ') id_ = input('Nhap id mon: ')", "r = requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url", "url == None: print('Khong tim thay mon nao nhu nay", "mon: ') url = get_url_sub(sub,id_,4) if url == None: print('Khong", "main(): sub = input('Nhap ten mon: ') id_ = input('Nhap", "or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return", "excel URL!!!') excel_url = get_excel_url(url) excel_url = excel_url.replace(' ','%20') print('Download", "excel_url.replace(' ','%20') print('Download excel file!!!') save_at = 'C:/Users/truon/Desktop/' filename =", "nhu nay ({} {}) ca :('.format(sub, id_)) return else: print('get", "id_)) return else: print('get excel URL!!!') excel_url = get_excel_url(url) excel_url", "= list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main():", "= requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url =", "str(td_tag.a.contents[0]))): print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def", "= soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag + list_td_tag for", "page): all_td_tag = [] for i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i))", "print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r", "'lxml') list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag +", "from bs4 import BeautifulSoup import requests from urllib.request import urlretrieve", "in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..',", "mon: ') id_ = input('Nhap id mon: ') url =", "= BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url", "save_at = 'C:/Users/truon/Desktop/' filename = save_at + excel_url.split('/')[-1].replace('%20',' ') urlretrieve(excel_url,filename)", "print('get excel URL!!!') excel_url = get_excel_url(url) excel_url = excel_url.replace(' ','%20')", "mon nao nhu nay ({} {}) ca :('.format(sub, id_)) return", "in str(td_tag.a.contents[0]))): print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT)", "excel_url = get_excel_url(url) excel_url = excel_url.replace(' ','%20') print('Download excel file!!!')", "list_td_tag for td_tag in all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0])) or", "urllib.request import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page):", "+ list_td_tag for td_tag in all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0]))", "get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub = input('Nhap ten mon: ') id_", "((sub+' '+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\\nComplete!!!') print('", "'+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split()))", "= soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a =", "def get_url_sub(sub, id_, page): all_td_tag = [] for i in", "all_td_tag + list_td_tag for td_tag in all_td_tag: if (((sub+id_) in", "else: print('get excel URL!!!') excel_url = get_excel_url(url) excel_url = excel_url.replace('", "bs4 import BeautifulSoup import requests from urllib.request import urlretrieve ROOT", "= [] for i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r =", "all_td_tag = all_td_tag + list_td_tag for td_tag in all_td_tag: if", "excel file!!!') save_at = 'C:/Users/truon/Desktop/' filename = save_at + excel_url.split('/')[-1].replace('%20','", "ROOT) def get_excel_url(url): r = requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags", "soup = BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag", "from urllib.request import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_,", "= excel_url.replace(' ','%20') print('Download excel file!!!') save_at = 'C:/Users/truon/Desktop/' filename", "') url = get_url_sub(sub,id_,4) if url == None: print('Khong tim", "get_url_sub(sub, id_, page): all_td_tag = [] for i in range(1,", "in all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0])) or ((sub+' '+id_) in", "'.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r =", "','%20') print('Download excel file!!!') save_at = 'C:/Users/truon/Desktop/' filename = save_at", "urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page): all_td_tag =", "id_ = input('Nhap id mon: ') url = get_url_sub(sub,id_,4) if", "id mon: ') url = get_url_sub(sub,id_,4) if url == None:", "ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r = requests.get(url) soup", "soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag + list_td_tag for td_tag", "input('Nhap id mon: ') url = get_url_sub(sub,id_,4) if url ==", "in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text,", "'C:/Users/truon/Desktop/' filename = save_at + excel_url.split('/')[-1].replace('%20',' ') urlretrieve(excel_url,filename) print('Done!') main()", "((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..',", "td_tag in all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0])) or ((sub+' '+id_)", "range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml')", "# a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub = input('Nhap ten", "= input('Nhap ten mon: ') id_ = input('Nhap id mon:", "') id_ = input('Nhap id mon: ') url = get_url_sub(sub,id_,4)", "= input('Nhap id mon: ') url = get_url_sub(sub,id_,4) if url", "{}) ca :('.format(sub, id_)) return else: print('get excel URL!!!') excel_url", "soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN')", "a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub = input('Nhap ten mon:", "return else: print('get excel URL!!!') excel_url = get_excel_url(url) excel_url =", "requests from urllib.request import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub,", "requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT)", "id_, page): all_td_tag = [] for i in range(1, page+1):", "all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0])) or ((sub+' '+id_) in str(td_tag.a.contents[0]))", "def main(): sub = input('Nhap ten mon: ') id_ =", "str(td_tag.a.contents[0])) or ((sub+' '+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))):", "get_excel_url(url): r = requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4')", "ten mon: ') id_ = input('Nhap id mon: ') url", "import requests from urllib.request import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def", "url = get_url_sub(sub,id_,4) if url == None: print('Khong tim thay", "print('Download excel file!!!') save_at = 'C:/Users/truon/Desktop/' filename = save_at +", "= 'C:/Users/truon/Desktop/' filename = save_at + excel_url.split('/')[-1].replace('%20',' ') urlretrieve(excel_url,filename) print('Done!')", "in str(td_tag.a.contents[0])) or ((sub+' '+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in", "if (((sub+id_) in str(td_tag.a.contents[0])) or ((sub+' '+id_) in str(td_tag.a.contents[0])) or", "if url == None: print('Khong tim thay mon nao nhu", "== None: print('Khong tim thay mon nao nhu nay ({}", "BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url #", "print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url):", "URL!!!') excel_url = get_excel_url(url) excel_url = excel_url.replace(' ','%20') print('Download excel", "r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td',", "attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag + list_td_tag for td_tag in", "= get_url_sub(sub,id_,4) if url == None: print('Khong tim thay mon", "nao nhu nay ({} {}) ca :('.format(sub, id_)) return else:", "get_url_sub(sub,id_,4) if url == None: print('Khong tim thay mon nao", "for td_tag in all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0])) or ((sub+'", "print('Khong tim thay mon nao nhu nay ({} {}) ca", "= all_td_tag + list_td_tag for td_tag in all_td_tag: if (((sub+id_)", "print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml') list_td_tag =", "= get_excel_url(url) excel_url = excel_url.replace(' ','%20') print('Download excel file!!!') save_at", "sub = input('Nhap ten mon: ') id_ = input('Nhap id", "({} {}) ca :('.format(sub, id_)) return else: print('get excel URL!!!')", "excel_url = excel_url.replace(' ','%20') print('Download excel file!!!') save_at = 'C:/Users/truon/Desktop/'", "excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub = input('Nhap", "return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r = requests.get(url) soup =", "= 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page): all_td_tag = [] for", "[] for i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i))", "'padding-top:10px'}) all_td_tag = all_td_tag + list_td_tag for td_tag in all_td_tag:", "for i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup", "= BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag =", "thay mon nao nhu nay ({} {}) ca :('.format(sub, id_))", "str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r = requests.get(url) soup = BeautifulSoup(r.text,'lxml')", "or ((sub+' '+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\\nComplete!!!')", "(((sub+id_) in str(td_tag.a.contents[0])) or ((sub+' '+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_)", "= requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td', attrs={'style':", "get_excel_url(url) excel_url = excel_url.replace(' ','%20') print('Download excel file!!!') save_at =", "list_span_tags = soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a", "excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def", "i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup =", "all_td_tag = [] for i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r", "'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page): all_td_tag = [] for i", "return excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub =", "import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page): all_td_tag", "requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'})", "print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r = requests.get(url)", "None: print('Khong tim thay mon nao nhu nay ({} {})", "list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag + list_td_tag", "str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT))", "soup = BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return", "nay ({} {}) ca :('.format(sub, id_)) return else: print('get excel", "file!!!') save_at = 'C:/Users/truon/Desktop/' filename = save_at + excel_url.split('/')[-1].replace('%20',' ')", ":('.format(sub, id_)) return else: print('get excel URL!!!') excel_url = get_excel_url(url)", "= get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub = input('Nhap ten mon: ')", "import BeautifulSoup import requests from urllib.request import urlretrieve ROOT =", "ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page): all_td_tag = []", "page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml') list_td_tag", "tim thay mon nao nhu nay ({} {}) ca :('.format(sub,", "list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub", "def get_excel_url(url): r = requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags =", "BeautifulSoup import requests from urllib.request import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn'" ]
[ "for Shopping. - Sending items to Content API for Shopping", "'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [],", "timestamp %s successfully processed %s items, failed to process %s", "__name__ == '__main__': # This is used when running locally.", "2.0 (the \"License\"); # you may not use this file", "via Shoptimizer for upsert/prevent_expiring operations if operation != constants.Operation.DELETE and", "The batch of product data to be optimized. batch_number: The", "receives batch jobs from TaskQueue. For each job, the module", "successfully processed %s items, failed to process %s items and", "methods=['POST']) def run_insert_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles uploading tasks pushed", "else: return TASK_RETRY_LIMIT if __name__ == '__main__': # This is", "content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result", "If the batch API call received an HttpError, mark every", "number that identifies this batch. operation: The operation to be", "constants import content_api_client import result_recorder import shoptimizer_client from models import", "\"\"\"Handles uploading tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items',", "constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used to check", "batch that can be sent to Content API for Shopping.", "limit. Returns: int, the number of times this task has", "optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError, ValueError): return batch", "error_reason, batch_number, http_error, items, operation, task) return error_reason, error_status_code except", "status: %s. Error: %s', batch_num, operation.value, task.timestamp, error_status_code, error_reason) #", "else: batch_to_send_to_content_api = original_batch # Sends batch of items to", "as http_error: error_status_code = http_error.resp.status error_reason = http_error.resp.reason result =", "google.cloud import bigquery from google.cloud import logging as cloud_logging from", "_load_items_from_bigquery(operation, task) except errors.HttpError: return 'Error loading items from BigQuery',", "= http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout' result = _handle_content_api_error(error_status_code, error_reason,", "constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number, operation) else: batch_to_send_to_content_api =", "batch via Shoptimizer for upsert/prevent_expiring operations if operation != constants.Operation.DELETE", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH,", "constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK", "_handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items, operation, task) return error_reason, error_status_code", "\"\"\"Loads items from BigQuery. Args: operation: The operation to be", "an HttpError, mark every id as failed. item_failures = [", "\"\"\" request_body = json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if task.batch_size ==", "from models import failure from models import process_result from models", "except errors.HttpError as http_error: logging.exception( 'Error loading items from %s.%s.", "the retry limit. Returns: int, the number of times this", "return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles", "permissions and # limitations under the License. \"\"\"Uploader module that", "alerting purposes. # Should match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT =", "from Content API and returns a list of item failures.", "executed. \"\"\" execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return", "are executed in this function: - Loading items to process", "logging.error( 'Batch #%d with operation %s and initiation timestamp %s", "Merchant Center. \"\"\" import http import json import logging import", "this request. Returns: The list of items that failed due", "def _get_execution_attempt() -> int: \"\"\"Returns the number of times this", "times this task has previously been executed. \"\"\" execution_attempt =", "that initiated this request. Returns: The list of items loaded", "batch_creator import bigquery_client import constants import content_api_client import result_recorder import", "if this is the last retry for alerting purposes. #", "API. Start_index: %d, batch_size: %d,' 'initiation timestamp: %s', batch_number, operation.value,", "id as failed. item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason)", "use this file except in compliance with the License. #", "OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT }", "Content API for Shopping. - Sending items to Content API", "Args: operation: Type of operation to perform on the items.", "batch_number: int, operation: constants.Operation) -> constants.Batch: \"\"\"Creates an optimized batch", "http.HTTPStatus]: \"\"\"Handles uploading tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.UPSERT)", "error_reason) # If the batch API call received an HttpError,", "list of items that failed due to the error, wrapped", "Content API and returns a list of item failures. Args:", "come from Cloud Tasks. In this case, there will be", "be called. It extracts necessary information from a Task Queue", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "returned from the Shoptimizer API Client. \"\"\" try: optimization_client =", "batch: The batch of product data to be optimized. batch_number:", "def run_insert_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles uploading tasks pushed from", "a process_result. \"\"\" logging.warning( 'Batch #%d with operation %s and", "License. # You may obtain a copy of the License", "Start_index: %d, batch_size: %d,' 'initiation timestamp: %s', batch_number, operation.value, task.start_index,", "content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with", "result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING,", "constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator = bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as", "Content API. error_reason: The reason for the error. batch_num: The", "http.HTTPStatus]: \"\"\"Handles deleting tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.DELETE)", "under the License is distributed on an \"AS IS\" BASIS,", "this method will be called. It extracts necessary information from", "License for the specific language governing permissions and # limitations", "0 items loaded from BigQuery so batch not sent to", "bigquery_client import constants import content_api_client import result_recorder import shoptimizer_client from", "error_status_code: HTTP status code from Content API. error_reason: The reason", "to Content API for Shopping api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures", "= _handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items, operation, task) return error_reason,", "models import failure from models import process_result from models import", "!= constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number, operation)", "process_result.ProcessResult: \"\"\"Logs network related errors returned from Content API and", "product data to be optimized. batch_number: The number that identifies", "item_row in item_rows ] api_result = process_result.ProcessResult([], item_failures, []) if", "timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout' result =", "this case, there will be no retry, so set execution", "upload_task.UploadTask.from_json(request_body) if task.batch_size == 0: return 'OK', http.HTTPStatus.OK batch_number =", "'Batch #%d with operation %s and initiation timestamp %s successfully", "LLC. # # Licensed under the Apache License, Version 2.0", "The operation to be performed on this batch (upsert, delete,", "#%d with operation %s and initiation timestamp %s failed and", "upsert/prevent_expiring operations if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api =", "so batch not sent to Content API. Start_index: %d, batch_size:", "error_status_code except socket.timeout as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason =", "API. error_reason: The reason for the error. batch_num: The batch", "import content_api_client import result_recorder import shoptimizer_client from models import failure", "batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids)", "that initiated this request. Returns: The list of items that", "does not exist, it means the request did not come", "governing permissions and # limitations under the License. \"\"\"Uploader module", "of HTTP request. \"\"\" request_body = json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body)", "= json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if task.batch_size == 0: return", "when running locally. Gunicorn is used to run the #", "of items. task: The Cloud Task object that initiated this", "'Batch #%d with operation %s and initiation timestamp %s failed.", "- Records the results of the Content API for Shopping", "constants.Batch: \"\"\"Creates an optimized batch by calling the Shoptimizer API.", "to the retry limit. Returns: int, the number of times", "in compliance with the License. # You may obtain a", "_run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles prevent", "executed. If the execution count header does not exist, it", "import constants import content_api_client import result_recorder import shoptimizer_client from models", "If the execution count header does not exist, it means", "software # distributed under the License is distributed on an", "Converts items into a batch that can be sent to", "failure from models import process_result from models import upload_task app", "cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT,", "\"\"\"Handles prevent expiring tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING)", "task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation, task:", "+ 1 logging.info( '%s started. Batch #%d info: start_index: %d,", "from googleapiclient import errors import batch_creator import bigquery_client import constants", "can be sent to Content API for Shopping. - Sending", "operations if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch(", "constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number) return 'OK',", "data from BigQuery and sends it to Merchant Center. \"\"\"", "%s', batch_num, operation.value, task.timestamp, error_status_code, error_reason) # If the batch", "-> Tuple[str, http.HTTPStatus]: \"\"\"Handles uploading tasks pushed from Task Queue.\"\"\"", "expiring tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation:", "upload_task.UploadTask) -> List[bigquery.Row]: \"\"\"Loads items from BigQuery. Args: operation: The", "except socket.timeout as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket", "= result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number)", "= _create_optimized_batch( original_batch, batch_number, operation) else: batch_to_send_to_content_api = original_batch #", "for retry', batch_num, operation.value, task.timestamp) else: logging.error( 'Batch #%d with", "called. It extracts necessary information from a Task Queue message.", "= http_error.resp.status error_reason = http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason, batch_number,", "error: The error thrown by Content API. item_rows: The items", "Tasks. In this case, there will be no retry, so", "application on Google App Engine. See entrypoint in app.yaml. app.run(host='127.0.0.1',", "Creates batch from items loaded from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id", "_get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with operation %s and", "failed to process %s items and skipped %s items.', batch_number,", "from a Task Queue message. The following processes are executed", "= _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items, operation, task) return error_reason,", "@app.route('/delete_items', methods=['POST']) def run_delete_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles deleting tasks", "import shoptimizer_client from models import failure from models import process_result", "on this batch of items. task: The Cloud Task object", "execution attempt to the retry limit. Returns: int, the number", "for Shopping call. Args: operation: Type of operation to perform", "#%d, operation %s: 0 items loaded from BigQuery so batch", "operation to perform on the items. Returns: The result of", "initiation timestamp %s failed and will not be retried. Error:", "handles batch jobs sent from Task Queue. This module receives", "import batch_creator import bigquery_client import constants import content_api_client import result_recorder", "batch of items to Content API for Shopping api_client =", "api_result = process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt()", "%d, batch_size: %d,' 'initiation timestamp: %s', operation.value, batch_number, task.start_index, task.batch_size,", "been executed. \"\"\" execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt:", "the # application on Google App Engine. See entrypoint in", "2021 Google LLC. # # Licensed under the Apache License,", "int(task.start_index / task.batch_size) + 1 logging.info( '%s started. Batch #%d", "task) return error_reason, error_status_code else: logging.info( 'Batch #%d with operation", "batch_number, items, method) # Optimizes batch via Shoptimizer for upsert/prevent_expiring", "socket from typing import List, Tuple import flask from google.cloud", "Google LLC. # # Licensed under the Apache License, Version", "operation.value, task.start_index, task.batch_size, task.timestamp) return 'No items to process', http.HTTPStatus.OK", "timeout' result = _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items, operation, task)", "request did not come from Cloud Tasks. In this case,", "BigQuery. - Converts items into a batch that can be", "batch_number, operation) else: batch_to_send_to_content_api = original_batch # Sends batch of", "int, error: Exception, item_rows: List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask) ->", "import http import json import logging import socket from typing", "%s', batch_number, operation.value, task.start_index, task.batch_size, task.timestamp) return 'No items to", "# Copyright 2021 Google LLC. # # Licensed under the", "The following processes are executed in this function: - Loading", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "%s items, failed to process %s items and skipped %s", "upload_task.UploadTask) -> process_result.ProcessResult: \"\"\"Logs network related errors returned from Content", "[]) try: if not items: logging.error( 'Batch #%d, operation %s:", "failed due to the error, wrapped in a process_result. \"\"\"", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "= flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD", "ID')), error_reason) for item_row in item_rows ] api_result = process_result.ProcessResult([],", "for Shopping api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api,", "the items. Returns: The result of HTTP request. \"\"\" request_body", "http.HTTPStatus]: \"\"\"Handles tasks pushed from Task Queue. When tasks are", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "http.HTTPStatus.OK batch_number = int(task.start_index / task.batch_size) + 1 logging.info( '%s", "operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number,", "< TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with operation %s and initiation", "return 'OK', http.HTTPStatus.OK batch_number = int(task.start_index / task.batch_size) + 1", "to in writing, software # distributed under the License is", "from the Shoptimizer API Client. \"\"\" try: optimization_client = shoptimizer_client.ShoptimizerClient(", "returned from Content API and returns a list of item", "will be requeued for retry', batch_num, operation.value, task.timestamp) else: logging.error(", "# See the License for the specific language governing permissions", "as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout' result", "ValueError): return batch return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int, error_reason:", "task: upload_task.UploadTask) -> process_result.ProcessResult: \"\"\"Logs network related errors returned from", "pushed from Task Queue. When tasks are enqueued to Task", "retry for alerting purposes. # Should match task_retry_limit in appengine/initiator/queue.yaml.", "error, wrapped in a process_result. \"\"\" logging.warning( 'Batch #%d with", "items. Returns: The result of HTTP request. \"\"\" request_body =", "or agreed to in writing, software # distributed under the", "task.timestamp, error) return api_result def _get_execution_attempt() -> int: \"\"\"Returns the", "batch_number, batch_id_to_item_id, method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except", "and initiation timestamp %s failed and will not be retried.", "Center. \"\"\" import http import json import logging import socket", "required by applicable law or agreed to in writing, software", "following processes are executed in this function: - Loading items", "items, operation, task) return error_reason, error_status_code except socket.timeout as timeout_error:", "The list of items loaded from BigQuery. \"\"\" table_id =", "an optimized batch by calling the Shoptimizer API. Args: batch:", "error thrown by Content API. item_rows: The items being processed", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "that failed due to the error, wrapped in a process_result.", "case, there will be no retry, so set execution attempt", "batch_num, operation.value, task.timestamp, error_status_code, error_reason) # If the batch API", "with the License. # You may obtain a copy of", "else: logging.error( 'Batch #%d with operation %s and initiation timestamp", "batch_num, operation.value, task.timestamp, error) return api_result def _get_execution_attempt() -> int:", "This is used when running locally. Gunicorn is used to", "not exist, it means the request did not come from", "result = _handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items, operation, task) return", "# Creates batch from items loaded from BigQuery original_batch, skipped_item_ids,", "task.batch_size, task.timestamp) return 'No items to process', http.HTTPStatus.OK method =", "task.timestamp, error_status_code, error_reason) # If the batch API call received", "Task object that initiated this request. Returns: The list of", "- Loading items to process from BigQuery. - Converts items", "failed. item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for item_row", "Task Queue. When tasks are enqueued to Task Queue by", "in a process_result. \"\"\" logging.warning( 'Batch #%d with operation %s", "Shopping. - Sending items to Content API for Shopping (Merchant", "TASK_RETRY_LIMIT if __name__ == '__main__': # This is used when", "import List, Tuple import flask from google.cloud import bigquery from", "The reason for the error. batch_num: The batch number. error:", "task.timestamp) return 'No items to process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation)", "constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK def", "Args: batch: The batch of product data to be optimized.", "- Converts items into a batch that can be sent", "Queue.\"\"\" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]:", "Task Queue.\"\"\" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process() -> Tuple[str,", "retry', batch_num, operation.value, task.timestamp) else: logging.error( 'Batch #%d with operation", "compliance with the License. # You may obtain a copy", "due to the error, wrapped in a process_result. \"\"\" logging.warning(", "on the items. Returns: The result of HTTP request. \"\"\"", "agreed to in writing, software # distributed under the License", "TaskQueue. For each job, the module loads data from BigQuery", "a batch that can be sent to Content API for", "License. \"\"\"Uploader module that handles batch jobs sent from Task", "BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [], []) try: if not", "match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST']) def", "extracts necessary information from a Task Queue message. The following", "HTTP request. \"\"\" request_body = json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if", "'Error loading items from %s.%s. HTTP status: %s. Error: %s',", "operation: constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult: \"\"\"Logs network related errors", "the last retry for alerting purposes. # Should match task_retry_limit", "of times this task has previously been executed. \"\"\" execution_attempt", "process %s items and skipped %s items.', batch_number, operation.value, task.timestamp,", "pushed from Task Queue.\"\"\" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process()", "distributed under the License is distributed on an \"AS IS\"", "try: if not items: logging.error( 'Batch #%d, operation %s: 0", "items.', batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder =", "batch_number, timeout_error, items, operation, task) return error_reason, error_status_code else: logging.info(", "import socket from typing import List, Tuple import flask from", "task.batch_size == 0: return 'OK', http.HTTPStatus.OK batch_number = int(task.start_index /", "operation: The operation to be performed on this batch of", "operation, task) return error_reason, error_status_code else: logging.info( 'Batch #%d with", "the Content API for Shopping call. Args: operation: Type of", "timeout_error, items, operation, task) return error_reason, error_status_code else: logging.info( 'Batch", "express or implied. # See the License for the specific", "task has previously been executed. If the execution count header", "except in compliance with the License. # You may obtain", "batch_size: %d,' 'initiation timestamp: %s', batch_number, operation.value, task.start_index, task.batch_size, task.timestamp)", "task has previously been executed. \"\"\" execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount',", "Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise return list(items_iterator) def", "%s and initiation timestamp %s failed. HTTP status: %s. Error:", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "batch jobs sent from Task Queue. This module receives batch", "googleapiclient import errors import batch_creator import bigquery_client import constants import", "# Sends batch of items to Content API for Shopping", "not use this file except in compliance with the License.", "constants.Method.INSERT } # Used to check if this is the", "loaded from BigQuery. \"\"\" table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json(", "times this task has previously been executed. If the execution", "models import upload_task app = flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG)", "items loaded from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number,", "batch_number, task.start_index, task.batch_size, task.timestamp) try: items = _load_items_from_bigquery(operation, task) except", "data to be optimized. batch_number: The number that identifies this", "} # Used to check if this is the last", "writing, software # distributed under the License is distributed on", "constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery(", "The operation to be performed on this batch of items.", "constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator = bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError", "is used when running locally. Gunicorn is used to run", "as http_error: logging.exception( 'Error loading items from %s.%s. HTTP status:", "constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult: \"\"\"Logs network related errors returned", "you may not use this file except in compliance with", "operation to be performed on this batch (upsert, delete, prevent_expiring).", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "[ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for item_row in item_rows ]", "from models import upload_task app = flask.Flask(__name__) _logging_client = cloud_logging.Client()", "tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def", "this batch of items. task: The Cloud Task object that", "errors returned from Content API and returns a list of", "information from a Task Queue message. The following processes are", "the Shoptimizer API. Args: batch: The batch of product data", "batch (upsert, delete, prevent_expiring). Returns: The batch returned from the", "status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise return", "operation %s and initiation timestamp %s failed and will not", "successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error: error_status_code = http_error.resp.status", "Shoptimizer API Client. \"\"\" try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation)", "'__main__': # This is used when running locally. Gunicorn is", "to Content API. Start_index: %d, batch_size: %d,' 'initiation timestamp: %s',", "BigQuery so batch not sent to Content API. Start_index: %d,", "'Batch #%d with operation %s and initiation timestamp %s failed", "task) except errors.HttpError: return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR", "requeued for retry', batch_num, operation.value, task.timestamp) else: logging.error( 'Batch #%d", "jobs from TaskQueue. For each job, the module loads data", "be no retry, so set execution attempt to the retry", "object that initiated this request. Returns: The list of items", "Task Queue.\"\"\" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() -> Tuple[str,", "\"\"\"Returns the number of times this task has previously been", "CONDITIONS OF ANY KIND, either express or implied. # See", "executed in this function: - Loading items to process from", "original_batch # Sends batch of items to Content API for", "loaded from BigQuery so batch not sent to Content API.", "items_iterator = bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as http_error: logging.exception( 'Error", "list of item failures. Args: error_status_code: HTTP status code from", "batch_number) return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask)", "reason for the error. batch_num: The batch number. error: The", "to perform on the items. Returns: The result of HTTP", "every id as failed. item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing ID')),", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json(", "be requeued for retry', batch_num, operation.value, task.timestamp) else: logging.error( 'Batch", "loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [], [])", "results of the Content API for Shopping call. Args: operation:", "list of items loaded from BigQuery. \"\"\" table_id = f'process_items_to_{operation.value}_{task.timestamp}'", "Cloud Tasks. In this case, there will be no retry,", "upload_task app = flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH =", "return list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number: int, operation: constants.Operation) ->", "constants.Operation) -> constants.Batch: \"\"\"Creates an optimized batch by calling the", "except (OSError, ValueError): return batch return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code:", "= [ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for item_row in item_rows", "'') if execution_attempt: return int(execution_attempt) else: return TASK_RETRY_LIMIT if __name__", "Used to check if this is the last retry for", "Content API for Shopping api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures =", "import failure from models import process_result from models import upload_task", "batch_number, operation) except (OSError, ValueError): return batch return optimization_client.shoptimize(batch) def", "shoptimizer_client from models import failure from models import process_result from", "operation %s and initiation timestamp %s will be requeued for", "is the last retry for alerting purposes. # Should match", "Task Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]:", "= content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method)", "List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult: \"\"\"Logs network related", "Loading items to process from BigQuery. - Converts items into", "from Task Queue.\"\"\" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process() ->", "Content API. Start_index: %d, batch_size: %d,' 'initiation timestamp: %s', batch_number,", "Content API for Shopping call. Args: operation: Type of operation", "http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout' result = _handle_content_api_error(error_status_code, error_reason, batch_number,", "and initiation timestamp %s failed. HTTP status: %s. Error: %s',", "locally. Gunicorn is used to run the # application on", "import logging import socket from typing import List, Tuple import", "= api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids,", "task = upload_task.UploadTask.from_json(request_body) if task.batch_size == 0: return 'OK', http.HTTPStatus.OK", "return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask) ->", "\"\"\"Logs network related errors returned from Content API and returns", "not come from Cloud Tasks. In this case, there will", "info: start_index: %d, batch_size: %d,' 'initiation timestamp: %s', operation.value, batch_number,", "return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles", "_handle_content_api_error( error_status_code: int, error_reason: str, batch_num: int, error: Exception, item_rows:", "items that failed due to the error, wrapped in a", "is used to run the # application on Google App", "For each job, the module loads data from BigQuery and", "to the error, wrapped in a process_result. \"\"\" logging.warning( 'Batch", "= flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return int(execution_attempt) else: return", "there will be no retry, so set execution attempt to", "OR CONDITIONS OF ANY KIND, either express or implied. #", "batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number, operation) else: batch_to_send_to_content_api = original_batch", "== 0: return 'OK', http.HTTPStatus.OK batch_number = int(task.start_index / task.batch_size)", "the License is distributed on an \"AS IS\" BASIS, #", "= shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError, ValueError): return batch return", "return int(execution_attempt) else: return TASK_RETRY_LIMIT if __name__ == '__main__': #", "else: logging.info( 'Batch #%d with operation %s and initiation timestamp", "sent from Task Queue. This module receives batch jobs from", "will be no retry, so set execution attempt to the", "pushed from Task Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) ->", "http_error: error_status_code = http_error.resp.status error_reason = http_error.resp.reason result = _handle_content_api_error(error_status_code,", "The Cloud Task object that initiated this request. Returns: The", "In this case, there will be no retry, so set", "number of times this task has previously been executed. If", "batch_creator.create_batch( batch_number, items, method) # Optimizes batch via Shoptimizer for", "OPERATION_TO_METHOD.get(operation) # Creates batch from items loaded from BigQuery original_batch,", "and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with operation %s", "bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator = bq_client.load_items(task.start_index, task.batch_size) except", "logging as cloud_logging from googleapiclient import errors import batch_creator import", "Queue. When tasks are enqueued to Task Queue by initiator,", "of the Content API for Shopping call. Args: operation: Type", "batch_id_to_item_id = batch_creator.create_batch( batch_number, items, method) # Optimizes batch via", "count header does not exist, it means the request did", "recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp,", "%s.%s. HTTP status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason)", "%s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise return list(items_iterator)", "logging.warning( 'Batch #%d with operation %s and initiation timestamp %s", "the results of the Content API for Shopping call. Args:", "= _load_items_from_bigquery(operation, task) except errors.HttpError: return 'Error loading items from", "function: - Loading items to process from BigQuery. - Converts", "errors.HttpError as http_error: error_status_code = http_error.resp.status error_reason = http_error.resp.reason result", "list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number: int, operation: constants.Operation) -> constants.Batch:", "error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout' result = _handle_content_api_error(error_status_code,", "Args: operation: The operation to be performed on this batch", "to be optimized. batch_number: The number that identifies this batch.", "the error. batch_num: The batch number. error: The error thrown", "prevent expiring tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING) def", "processed in this batch. operation: The operation to be performed", "from models import process_result from models import upload_task app =", "batch_id_to_item_id, method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError", "operation.value, task.timestamp, error_status_code, error_reason) # If the batch API call", "sends it to Merchant Center. \"\"\" import http import json", "law or agreed to in writing, software # distributed under", "(Merchant Center). - Records the results of the Content API", "processed %s items, failed to process %s items and skipped", "failed. HTTP status: %s. Error: %s', batch_num, operation.value, task.timestamp, error_status_code,", "import process_result from models import upload_task app = flask.Flask(__name__) _logging_client", "%s and initiation timestamp %s will be requeued for retry',", "with operation %s and initiation timestamp %s will be requeued", "timestamp %s will be requeued for retry', batch_num, operation.value, task.timestamp)", "and returns a list of item failures. Args: error_status_code: HTTP", "operation %s: 0 items loaded from BigQuery so batch not", "from Task Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) -> Tuple[str,", "%s successfully processed %s items, failed to process %s items", "%s items.', batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder", "= OPERATION_TO_METHOD.get(operation) # Creates batch from items loaded from BigQuery", "number. error: The error thrown by Content API. item_rows: The", "if execution_attempt: return int(execution_attempt) else: return TASK_RETRY_LIMIT if __name__ ==", "Shopping (Merchant Center). - Records the results of the Content", "be performed on this batch of items. task: The Cloud", "try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError, ValueError): return", "API. Args: batch: The batch of product data to be", "for item_row in item_rows ] api_result = process_result.ProcessResult([], item_failures, [])", "int, operation: constants.Operation) -> constants.Batch: \"\"\"Creates an optimized batch by", "items from BigQuery. Args: operation: The operation to be performed", "items and skipped %s items.', batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(),", "header does not exist, it means the request did not", "the License. \"\"\"Uploader module that handles batch jobs sent from", "socket.timeout as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout'", "for the error. batch_num: The batch number. error: The error", "Returns: The result of HTTP request. \"\"\" request_body = json.loads(flask.request.data.decode('utf-8'))", "task) return error_reason, error_status_code except socket.timeout as timeout_error: error_status_code =", "error_reason = http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items,", "The list of items that failed due to the error,", "\"\"\" logging.warning( 'Batch #%d with operation %s and initiation timestamp", "retry limit. Returns: int, the number of times this task", "def _handle_content_api_error( error_status_code: int, error_reason: str, batch_num: int, error: Exception,", "items, failed to process %s items and skipped %s items.',", "loaded from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number, items,", "item_failures, []) if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning(", "may obtain a copy of the License at # #", "error_reason = 'Socket timeout' result = _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error,", "HTTP status: %s. Error: %s', batch_num, operation.value, task.timestamp, error_status_code, error_reason)", "When tasks are enqueued to Task Queue by initiator, this", "The batch number. error: The error thrown by Content API.", "items being processed in this batch. operation: The operation to", "batch return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int, error_reason: str, batch_num:", "item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result = process_result.ProcessResult(", "optimized batch by calling the Shoptimizer API. Args: batch: The", "import bigquery from google.cloud import logging as cloud_logging from googleapiclient", "= upload_task.UploadTask.from_json(request_body) if task.batch_size == 0: return 'OK', http.HTTPStatus.OK batch_number", "error_status_code else: logging.info( 'Batch #%d with operation %s and initiation", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "] api_result = process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry( error_status_code) and", "Copyright 2021 Google LLC. # # Licensed under the Apache", "bigquery from google.cloud import logging as cloud_logging from googleapiclient import", "appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST']) def run_insert_process() -> Tuple[str,", "= cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT:", "batch_number: The number that identifies this batch. operation: The operation", "(upsert, delete, prevent_expiring). Returns: The batch returned from the Shoptimizer", "initiated this request. Returns: The list of items loaded from", "http_error.resp.reason) raise return list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number: int, operation:", "may not use this file except in compliance with the", "json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if task.batch_size == 0: return 'OK',", "to Merchant Center. \"\"\" import http import json import logging", "this is the last retry for alerting purposes. # Should", "with operation %s and initiation timestamp %s successfully processed %s", "error_reason, error_status_code except socket.timeout as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason", "Returns: The list of items that failed due to the", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "sent to Content API for Shopping. - Sending items to", "this file except in compliance with the License. # You", "This module receives batch jobs from TaskQueue. For each job,", "result = process_result.ProcessResult([], [], []) try: if not items: logging.error(", "Sends batch of items to Content API for Shopping api_client", "http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items, operation, task)", "result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number) return", "Queue. This module receives batch jobs from TaskQueue. For each", "Type of operation to perform on the items. Returns: The", "batch of items. task: The Cloud Task object that initiated", "used when running locally. Gunicorn is used to run the", "constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used to check if this is", "operation: Type of operation to perform on the items. Returns:", "of item failures. Args: error_status_code: HTTP status code from Content", "error_reason, batch_number, timeout_error, items, operation, task) return error_reason, error_status_code else:", "str, batch_num: int, error: Exception, item_rows: List[bigquery.Row], operation: constants.Operation, task:", "and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number, operation) else: batch_to_send_to_content_api", "_load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]: \"\"\"Loads items from", "flask from google.cloud import bigquery from google.cloud import logging as", "API. item_rows: The items being processed in this batch. operation:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value,", "Content API. item_rows: The items being processed in this batch.", "process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) # Creates batch from items", "to be performed on this batch (upsert, delete, prevent_expiring). Returns:", "'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return int(execution_attempt) else: return TASK_RETRY_LIMIT if", "# # Licensed under the Apache License, Version 2.0 (the", "jobs sent from Task Queue. This module receives batch jobs", "operation %s and initiation timestamp %s successfully processed %s items,", "execution_attempt: return int(execution_attempt) else: return TASK_RETRY_LIMIT if __name__ == '__main__':", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "task.timestamp) try: items = _load_items_from_bigquery(operation, task) except errors.HttpError: return 'Error", "-> List[bigquery.Row]: \"\"\"Loads items from BigQuery. Args: operation: The operation", "network related errors returned from Content API and returns a", "method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as", "that identifies this batch. operation: The operation to be performed", "to Task Queue by initiator, this method will be called.", "did not come from Cloud Tasks. In this case, there", "logging.info( '%s started. Batch #%d info: start_index: %d, batch_size: %d,'", "items. task: The Cloud Task object that initiated this request.", "exist, it means the request did not come from Cloud", "API call received an HttpError, mark every id as failed.", "optimized. batch_number: The number that identifies this batch. operation: The", "%s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise return list(items_iterator) def _create_optimized_batch(batch:", "http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [], []) try: if not items:", "if __name__ == '__main__': # This is used when running", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "run_delete_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles deleting tasks pushed from Task", "app = flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json'", "%s items and skipped %s items.', batch_number, operation.value, task.timestamp, result.get_success_count(),", "Shopping call. Args: operation: Type of operation to perform on", "operation: constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]: \"\"\"Loads items from BigQuery.", "= f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator", "# coding=utf-8 # Copyright 2021 Google LLC. # # Licensed", "method will be called. It extracts necessary information from a", "items to Content API for Shopping api_client = content_api_client.ContentApiClient() successful_item_ids,", "http_error, items, operation, task) return error_reason, error_status_code except socket.timeout as", "task.batch_size) except errors.HttpError as http_error: logging.exception( 'Error loading items from", "constants.Batch, batch_number: int, operation: constants.Operation) -> constants.Batch: \"\"\"Creates an optimized", "-> Tuple[str, http.HTTPStatus]: \"\"\"Handles prevent expiring tasks pushed from Task", "the number of times this task has previously been executed.", "deleting tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST'])", "module that handles batch jobs sent from Task Queue. This", "of items that failed due to the error, wrapped in", "\"\"\" import http import json import logging import socket from", "error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with operation", "= 'Socket timeout' result = _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items,", "number of times this task has previously been executed. \"\"\"", "def run_delete_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles deleting tasks pushed from", "of product data to be optimized. batch_number: The number that", "%s and initiation timestamp %s failed and will not be", "run_insert_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles uploading tasks pushed from Task", "return api_result def _get_execution_attempt() -> int: \"\"\"Returns the number of", "It extracts necessary information from a Task Queue message. The", "run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles prevent expiring tasks pushed from", "%s. Error: %s', batch_num, operation.value, task.timestamp, error_status_code, error_reason) # If", "int, error_reason: str, batch_num: int, error: Exception, item_rows: List[bigquery.Row], operation:", "that can be sent to Content API for Shopping. -", "loads data from BigQuery and sends it to Merchant Center.", "initiator, this method will be called. It extracts necessary information", "finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result,", "logging.error( 'Batch #%d, operation %s: 0 items loaded from BigQuery", "the Shoptimizer API Client. \"\"\" try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number,", "TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with operation %s and initiation timestamp", "not items: logging.error( 'Batch #%d, operation %s: 0 items loaded", "import result_recorder import shoptimizer_client from models import failure from models", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "skipped %s items.', batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally:", "with operation %s and initiation timestamp %s failed. HTTP status:", "# Optimizes batch via Shoptimizer for upsert/prevent_expiring operations if operation", "operation to be performed on this batch of items. task:", "tasks pushed from Task Queue. When tasks are enqueued to", "table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try:", "task: The Cloud Task object that initiated this request. Returns:", "= bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator = bq_client.load_items(task.start_index, task.batch_size)", "operation: constants.Operation) -> constants.Batch: \"\"\"Creates an optimized batch by calling", "'No items to process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) # Creates", "logging import socket from typing import List, Tuple import flask", "Shoptimizer API. Args: batch: The batch of product data to", "batch_num, operation.value, task.timestamp) else: logging.error( 'Batch #%d with operation %s", "task.timestamp) else: logging.error( 'Batch #%d with operation %s and initiation", "'Batch #%d with operation %s and initiation timestamp %s will", "int, the number of times this task has previously been", "constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise return list(items_iterator) def _create_optimized_batch(batch: constants.Batch,", "batch returned from the Shoptimizer API Client. \"\"\" try: optimization_client", "or implied. # See the License for the specific language", "'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT", "Shopping api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number,", "= process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error: error_status_code", "this batch. operation: The operation to be performed on this", "items, method) # Optimizes batch via Shoptimizer for upsert/prevent_expiring operations", "set execution attempt to the retry limit. Returns: int, the", "loading items from %s.%s. HTTP status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING,", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "method = OPERATION_TO_METHOD.get(operation) # Creates batch from items loaded from", "the module loads data from BigQuery and sends it to", "necessary information from a Task Queue message. The following processes", "batch_number, http_error, items, operation, task) return error_reason, error_status_code except socket.timeout", "logging.info( 'Batch #%d with operation %s and initiation timestamp %s", "\"\"\" try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError, ValueError):", "failures. Args: error_status_code: HTTP status code from Content API. error_reason:", "from BigQuery. - Converts items into a batch that can", "return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([],", "int(execution_attempt) else: return TASK_RETRY_LIMIT if __name__ == '__main__': # This", "'OK', http.HTTPStatus.OK batch_number = int(task.start_index / task.batch_size) + 1 logging.info(", "method) # Optimizes batch via Shoptimizer for upsert/prevent_expiring operations if", "f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator =", "error) return api_result def _get_execution_attempt() -> int: \"\"\"Returns the number", "# application on Google App Engine. See entrypoint in app.yaml.", "to run the # application on Google App Engine. See", "result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error:", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "batch. operation: The operation to be performed on this batch", "Task Queue by initiator, this method will be called. It", "content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error: error_status_code = http_error.resp.status error_reason", "enqueued to Task Queue by initiator, this method will be", "'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]:", "Records the results of the Content API for Shopping call.", "message. The following processes are executed in this function: -", "operation.value, task.timestamp) else: logging.error( 'Batch #%d with operation %s and", "error_status_code: int, error_reason: str, batch_num: int, error: Exception, item_rows: List[bigquery.Row],", "# limitations under the License. \"\"\"Uploader module that handles batch", "from Content API. error_reason: The reason for the error. batch_num:", "batch_size: %d,' 'initiation timestamp: %s', operation.value, batch_number, task.start_index, task.batch_size, task.timestamp)", "of operation to perform on the items. Returns: The result", "_handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items, operation, task) return error_reason, error_status_code", "(the \"License\"); # you may not use this file except", "import json import logging import socket from typing import List,", "from Task Queue.\"\"\" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() ->", "# you may not use this file except in compliance", "@app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles prevent expiring", "call. Args: operation: Type of operation to perform on the", "means the request did not come from Cloud Tasks. In", "operation) else: batch_to_send_to_content_api = original_batch # Sends batch of items", "def _create_optimized_batch(batch: constants.Batch, batch_number: int, operation: constants.Operation) -> constants.Batch: \"\"\"Creates", "Tuple[str, http.HTTPStatus]: \"\"\"Handles deleting tasks pushed from Task Queue.\"\"\" return", "_get_execution_attempt() -> int: \"\"\"Returns the number of times this task", "of items to Content API for Shopping api_client = content_api_client.ContentApiClient()", "Batch #%d info: start_index: %d, batch_size: %d,' 'initiation timestamp: %s',", "error_status_code = http_error.resp.status error_reason = http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason,", "# This is used when running locally. Gunicorn is used", "Returns: The batch returned from the Shoptimizer API Client. \"\"\"", "of items loaded from BigQuery. \"\"\" table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client", "by initiator, this method will be called. It extracts necessary", "flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return int(execution_attempt) else: return TASK_RETRY_LIMIT", "item_rows: The items being processed in this batch. operation: The", "batch of product data to be optimized. batch_number: The number", "Gunicorn is used to run the # application on Google", "code from Content API. error_reason: The reason for the error.", "item failures. Args: error_status_code: HTTP status code from Content API.", "from %s.%s. HTTP status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status,", "delete, prevent_expiring). Returns: The batch returned from the Shoptimizer API", "# Used to check if this is the last retry", "are enqueued to Task Queue by initiator, this method will", "to process %s items and skipped %s items.', batch_number, operation.value,", "previously been executed. If the execution count header does not", "to process from BigQuery. - Converts items into a batch", "# # Unless required by applicable law or agreed to", "Returns: The list of items loaded from BigQuery. \"\"\" table_id", "http.HTTPStatus]: \"\"\"Handles prevent expiring tasks pushed from Task Queue.\"\"\" return", "%s: 0 items loaded from BigQuery so batch not sent", "import logging as cloud_logging from googleapiclient import errors import batch_creator", "batch_number, operation.value, task.start_index, task.batch_size, task.timestamp) return 'No items to process',", "http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) # Creates batch from items loaded", "\"\"\"Creates an optimized batch by calling the Shoptimizer API. Args:", "initiated this request. Returns: The list of items that failed", "execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return int(execution_attempt) else:", "API and returns a list of item failures. Args: error_status_code:", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "request. \"\"\" request_body = json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if task.batch_size", "for Shopping (Merchant Center). - Records the results of the", "0: return 'OK', http.HTTPStatus.OK batch_number = int(task.start_index / task.batch_size) +", "'Missing ID')), error_reason) for item_row in item_rows ] api_result =", "prevent_expiring). Returns: The batch returned from the Shoptimizer API Client.", "the execution count header does not exist, it means the", "google.cloud import logging as cloud_logging from googleapiclient import errors import", "Version 2.0 (the \"License\"); # you may not use this", "TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST']) def run_insert_process() -> Tuple[str, http.HTTPStatus]:", "5 @app.route('/insert_items', methods=['POST']) def run_insert_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles uploading", "Returns: int, the number of times this task has previously", "initiation timestamp %s failed. HTTP status: %s. Error: %s', batch_num,", "that handles batch jobs sent from Task Queue. This module", "the error, wrapped in a process_result. \"\"\" logging.warning( 'Batch #%d", "as cloud_logging from googleapiclient import errors import batch_creator import bigquery_client", "def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]: \"\"\"Handles tasks pushed from", "api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures,", "items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [], []) try:", "Error: %s', batch_num, operation.value, task.timestamp, error_status_code, error_reason) # If the", "if task.batch_size == 0: return 'OK', http.HTTPStatus.OK batch_number = int(task.start_index", "%s failed and will not be retried. Error: %s', batch_num,", "\"\"\"Handles tasks pushed from Task Queue. When tasks are enqueued", "with operation %s and initiation timestamp %s failed and will", "@app.route('/insert_items', methods=['POST']) def run_insert_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles uploading tasks", "if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch #%d", "operation %s and initiation timestamp %s failed. HTTP status: %s.", "implied. # See the License for the specific language governing", "identifies this batch. operation: The operation to be performed on", "Error: %s', batch_num, operation.value, task.timestamp, error) return api_result def _get_execution_attempt()", "so set execution attempt to the retry limit. Returns: int,", "performed on this batch of items. task: The Cloud Task", "under the Apache License, Version 2.0 (the \"License\"); # you", "#%d info: start_index: %d, batch_size: %d,' 'initiation timestamp: %s', operation.value,", "to be performed on this batch of items. task: The", "Args: error_status_code: HTTP status code from Content API. error_reason: The", "it means the request did not come from Cloud Tasks.", "this task has previously been executed. \"\"\" execution_attempt = flask.request.headers.get(", "in this function: - Loading items to process from BigQuery.", "Center). - Records the results of the Content API for", "= process_result.ProcessResult([], [], []) try: if not items: logging.error( 'Batch", "batch_num: int, error: Exception, item_rows: List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask)", "API for Shopping api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items(", "job, the module loads data from BigQuery and sends it", "from typing import List, Tuple import flask from google.cloud import", "by applicable law or agreed to in writing, software #", "no retry, so set execution attempt to the retry limit.", "operation.value, batch_number, task.start_index, task.batch_size, task.timestamp) try: items = _load_items_from_bigquery(operation, task)", "return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]: \"\"\"Handles tasks", "[]) if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch", "API Client. \"\"\" try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation) except", "from BigQuery and sends it to Merchant Center. \"\"\" import", "check if this is the last retry for alerting purposes.", "batch jobs from TaskQueue. For each job, the module loads", "The error thrown by Content API. item_rows: The items being", "result_recorder import shoptimizer_client from models import failure from models import", "original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number, items, method) # Optimizes", "item_rows: List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult: \"\"\"Logs network", "HTTP status code from Content API. error_reason: The reason for", "will not be retried. Error: %s', batch_num, operation.value, task.timestamp, error)", "be optimized. batch_number: The number that identifies this batch. operation:", "errors.HttpError: return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result =", "_create_optimized_batch( original_batch, batch_number, operation) else: batch_to_send_to_content_api = original_batch # Sends", "'Socket timeout' result = _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items, operation,", "# If the batch API call received an HttpError, mark", "and will not be retried. Error: %s', batch_num, operation.value, task.timestamp,", "%s', batch_num, operation.value, task.timestamp, error) return api_result def _get_execution_attempt() ->", "will be called. It extracts necessary information from a Task", "raise return list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number: int, operation: constants.Operation)", "BigQuery original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number, items, method) #", "be retried. Error: %s', batch_num, operation.value, task.timestamp, error) return api_result", "call received an HttpError, mark every id as failed. item_failures", "Cloud Task object that initiated this request. Returns: The list", "\"\"\" table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id)", "return batch return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int, error_reason: str,", "failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for item_row in item_rows ] api_result", "errors.HttpError as http_error: logging.exception( 'Error loading items from %s.%s. HTTP", "execution count header does not exist, it means the request", "Task Queue message. The following processes are executed in this", "from Task Queue. This module receives batch jobs from TaskQueue.", "the request did not come from Cloud Tasks. In this", "a Task Queue message. The following processes are executed in", "Google App Engine. See entrypoint in app.yaml. app.run(host='127.0.0.1', port=8080, debug=True)", "result.get_failure_count(), result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING)", "task.start_index, task.batch_size, task.timestamp) return 'No items to process', http.HTTPStatus.OK method", "flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD =", "from TaskQueue. For each job, the module loads data from", "sent to Content API. Start_index: %d, batch_size: %d,' 'initiation timestamp:", "this request. Returns: The list of items loaded from BigQuery.", "and # limitations under the License. \"\"\"Uploader module that handles", "= bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as http_error: logging.exception( 'Error loading", "methods=['POST']) def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles prevent expiring tasks", "#%d with operation %s and initiation timestamp %s failed. HTTP", "tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def", "item_rows ] api_result = process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry( error_status_code)", "'%s started. Batch #%d info: start_index: %d, batch_size: %d,' 'initiation", "List[bigquery.Row]: \"\"\"Loads items from BigQuery. Args: operation: The operation to", "batch API call received an HttpError, mark every id as", "has previously been executed. \"\"\" execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '')", "Should match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST'])", "\"\"\" execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return int(execution_attempt)", "run the # application on Google App Engine. See entrypoint", "status code from Content API. error_reason: The reason for the", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "timestamp %s failed and will not be retried. Error: %s',", "{ constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used", "batch by calling the Shoptimizer API. Args: batch: The batch", "Unless required by applicable law or agreed to in writing,", "%d,' 'initiation timestamp: %s', batch_number, operation.value, task.start_index, task.batch_size, task.timestamp) return", "models import process_result from models import upload_task app = flask.Flask(__name__)", "BigQuery. \"\"\" table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING,", "items to process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) # Creates batch", "operation: The operation to be performed on this batch (upsert,", "on Google App Engine. See entrypoint in app.yaml. app.run(host='127.0.0.1', port=8080,", "_SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE,", "items loaded from BigQuery so batch not sent to Content", "operation, task) return error_reason, error_status_code except socket.timeout as timeout_error: error_status_code", "errors import batch_creator import bigquery_client import constants import content_api_client import", "API for Shopping call. Args: operation: Type of operation to", "if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch,", "language governing permissions and # limitations under the License. \"\"\"Uploader", "result of HTTP request. \"\"\" request_body = json.loads(flask.request.data.decode('utf-8')) task =", "= original_batch # Sends batch of items to Content API", "http import json import logging import socket from typing import", "%d, batch_size: %d,' 'initiation timestamp: %s', batch_number, operation.value, task.start_index, task.batch_size,", "from Task Queue. When tasks are enqueued to Task Queue", "the specific language governing permissions and # limitations under the", "and skipped %s items.', batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count())", "failed and will not be retried. Error: %s', batch_num, operation.value,", "perform on the items. Returns: The result of HTTP request.", "timestamp: %s', batch_number, operation.value, task.start_index, task.batch_size, task.timestamp) return 'No items", "BigQuery. Args: operation: The operation to be performed on this", "applicable law or agreed to in writing, software # distributed", "The batch returned from the Shoptimizer API Client. \"\"\" try:", "= int(task.start_index / task.batch_size) + 1 logging.info( '%s started. Batch", "API for Shopping (Merchant Center). - Records the results of", "module loads data from BigQuery and sends it to Merchant", "json import logging import socket from typing import List, Tuple", "= 'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING:", "by Content API. item_rows: The items being processed in this", "and initiation timestamp %s successfully processed %s items, failed to", "it to Merchant Center. \"\"\" import http import json import", "_run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]: \"\"\"Handles tasks pushed from Task", "from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [], []) try: if", "%s and initiation timestamp %s successfully processed %s items, failed", "task.start_index, task.batch_size, task.timestamp) try: items = _load_items_from_bigquery(operation, task) except errors.HttpError:", "item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for item_row in", "previously been executed. \"\"\" execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if", "tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation)", "limitations under the License. \"\"\"Uploader module that handles batch jobs", "import upload_task app = flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH", "for alerting purposes. # Should match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT", "to Content API for Shopping (Merchant Center). - Records the", "in writing, software # distributed under the License is distributed", "operation.value, task.timestamp, error) return api_result def _get_execution_attempt() -> int: \"\"\"Returns", "be sent to Content API for Shopping. - Sending items", "result, task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation,", "timestamp %s failed. HTTP status: %s. Error: %s', batch_num, operation.value,", "api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id,", "related errors returned from Content API and returns a list", "content_api_client import result_recorder import shoptimizer_client from models import failure from", "shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError, ValueError): return batch return optimization_client.shoptimize(batch)", "items from %s.%s. HTTP status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id,", "retried. Error: %s', batch_num, operation.value, task.timestamp, error) return api_result def", "Sending items to Content API for Shopping (Merchant Center). -", "1 logging.info( '%s started. Batch #%d info: start_index: %d, batch_size:", "cloud_logging from googleapiclient import errors import batch_creator import bigquery_client import", "this batch (upsert, delete, prevent_expiring). Returns: The batch returned from", "def _load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]: \"\"\"Loads items", "batch number. error: The error thrown by Content API. item_rows:", "to Content API for Shopping. - Sending items to Content", "batch from items loaded from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id =", "to process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) # Creates batch from", "= batch_creator.create_batch( batch_number, items, method) # Optimizes batch via Shoptimizer", "this function: - Loading items to process from BigQuery. -", "timestamp: %s', operation.value, batch_number, task.start_index, task.batch_size, task.timestamp) try: items =", "original_batch, batch_number, operation) else: batch_to_send_to_content_api = original_batch # Sends batch", "import flask from google.cloud import bigquery from google.cloud import logging", "typing import List, Tuple import flask from google.cloud import bigquery", "successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result =", "optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int, error_reason: str, batch_num: int, error:", "task: upload_task.UploadTask) -> List[bigquery.Row]: \"\"\"Loads items from BigQuery. Args: operation:", "processes are executed in this function: - Loading items to", "in item_rows ] api_result = process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry(", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "from items loaded from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch(", "error_reason: str, batch_num: int, error: Exception, item_rows: List[bigquery.Row], operation: constants.Operation,", "Tuple[str, http.HTTPStatus]: \"\"\"Handles uploading tasks pushed from Task Queue.\"\"\" return", "Exception, item_rows: List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult: \"\"\"Logs", "License, Version 2.0 (the \"License\"); # you may not use", "- Sending items to Content API for Shopping (Merchant Center).", "/ task.batch_size) + 1 logging.info( '%s started. Batch #%d info:", "%s', operation.value, batch_number, task.start_index, task.batch_size, task.timestamp) try: items = _load_items_from_bigquery(operation,", "# You may obtain a copy of the License at", "'initiation timestamp: %s', batch_number, operation.value, task.start_index, task.batch_size, task.timestamp) return 'No", "constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used to check if", "Tuple[str, http.HTTPStatus]: \"\"\"Handles tasks pushed from Task Queue. When tasks", "except errors.HttpError: return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result", "last retry for alerting purposes. # Should match task_retry_limit in", "and sends it to Merchant Center. \"\"\" import http import", "returns a list of item failures. Args: error_status_code: HTTP status", "from google.cloud import logging as cloud_logging from googleapiclient import errors", "from google.cloud import bigquery from google.cloud import logging as cloud_logging", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "calling the Shoptimizer API. Args: batch: The batch of product", "in this batch. operation: The operation to be performed on", "constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used to check if this", "return error_reason, error_status_code else: logging.info( 'Batch #%d with operation %s", "by calling the Shoptimizer API. Args: batch: The batch of", "this task has previously been executed. If the execution count", "%s failed. HTTP status: %s. Error: %s', batch_num, operation.value, task.timestamp,", "items into a batch that can be sent to Content", "items, operation, task) return error_reason, error_status_code else: logging.info( 'Batch #%d", "-> Tuple[str, http.HTTPStatus]: \"\"\"Handles tasks pushed from Task Queue. When", "a list of item failures. Args: error_status_code: HTTP status code", "and initiation timestamp %s will be requeued for retry', batch_num,", "constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used to", "task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING,", "return error_reason, error_status_code except socket.timeout as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT", "except errors.HttpError as http_error: error_status_code = http_error.resp.status error_reason = http_error.resp.reason", "try: items_iterator = bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as http_error: logging.exception(", "The result of HTTP request. \"\"\" request_body = json.loads(flask.request.data.decode('utf-8')) task", "tasks are enqueued to Task Queue by initiator, this method", "process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error: error_status_code =", "-> constants.Batch: \"\"\"Creates an optimized batch by calling the Shoptimizer", "the License for the specific language governing permissions and #", "started. Batch #%d info: start_index: %d, batch_size: %d,' 'initiation timestamp:", "= { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } #", "Apache License, Version 2.0 (the \"License\"); # you may not", "uploading tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST'])", "as failed. item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for", "not be retried. Error: %s', batch_num, operation.value, task.timestamp, error) return", "either express or implied. # See the License for the", "request. Returns: The list of items loaded from BigQuery. \"\"\"", "being processed in this batch. operation: The operation to be", "received an HttpError, mark every id as failed. item_failures =", "start_index: %d, batch_size: %d,' 'initiation timestamp: %s', operation.value, batch_number, task.start_index,", "batch not sent to Content API. Start_index: %d, batch_size: %d,'", "table_id) try: items_iterator = bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as http_error:", "error_reason, error_status_code else: logging.info( 'Batch #%d with operation %s and", "error_status_code, error_reason) # If the batch API call received an", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "http_error.resp.status, http_error.resp.reason) raise return list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number: int,", "each job, the module loads data from BigQuery and sends", "return 'No items to process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) #", "recorder.insert_result(operation.value, result, task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation:", "the batch API call received an HttpError, mark every id", "items = _load_items_from_bigquery(operation, task) except errors.HttpError: return 'Error loading items", "result = _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items, operation, task) return", "from BigQuery. Args: operation: The operation to be performed on", "process_result.ProcessResult([], [], []) try: if not items: logging.error( 'Batch #%d,", "return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int, error_reason: str, batch_num: int,", "= process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() <", "retry, so set execution attempt to the retry limit. Returns:", "error: Exception, item_rows: List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult:", "Queue by initiator, this method will be called. It extracts", "process_result from models import upload_task app = flask.Flask(__name__) _logging_client =", "# Should match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items',", "in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST']) def run_insert_process() ->", "task.batch_size, task.timestamp) try: items = _load_items_from_bigquery(operation, task) except errors.HttpError: return", "process from BigQuery. - Converts items into a batch that", "from BigQuery so batch not sent to Content API. Start_index:", "\"\"\"Handles deleting tasks pushed from Task Queue.\"\"\" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items',", "%d,' 'initiation timestamp: %s', operation.value, batch_number, task.start_index, task.batch_size, task.timestamp) try:", "\"\"\"Uploader module that handles batch jobs sent from Task Queue.", "http_error.resp.status error_reason = http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason, batch_number, http_error,", "#%d with operation %s and initiation timestamp %s will be", "'initiation timestamp: %s', operation.value, batch_number, task.start_index, task.batch_size, task.timestamp) try: items", "from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number, items, method)", "been executed. If the execution count header does not exist,", "from Cloud Tasks. In this case, there will be no", "process_result. \"\"\" logging.warning( 'Batch #%d with operation %s and initiation", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error: error_status_code = http_error.resp.status error_reason =", "_run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles deleting", "constants.Operation) -> Tuple[str, http.HTTPStatus]: \"\"\"Handles tasks pushed from Task Queue.", "HTTP status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise", "for upsert/prevent_expiring operations if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api", "http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]: \"\"\"Loads", "items to process from BigQuery. - Converts items into a", "used to run the # application on Google App Engine.", "Client. \"\"\" try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError,", "under the License. \"\"\"Uploader module that handles batch jobs sent", "constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number, operation) else:", "The number that identifies this batch. operation: The operation to", "on this batch (upsert, delete, prevent_expiring). Returns: The batch returned", "has previously been executed. If the execution count header does", "= http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items, operation,", "import errors import batch_creator import bigquery_client import constants import content_api_client", "bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as http_error: logging.exception( 'Error loading items", "to check if this is the last retry for alerting", "skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number, items, method) # Optimizes batch", "wrapped in a process_result. \"\"\" logging.warning( 'Batch #%d with operation", "Queue.\"\"\" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]: \"\"\"Handles", "task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST']) def run_insert_process()", "bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator = bq_client.load_items(task.start_index,", "(OSError, ValueError): return batch return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int,", "constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]: \"\"\"Loads items from BigQuery. Args:", "api_result def _get_execution_attempt() -> int: \"\"\"Returns the number of times", "Shoptimizer for upsert/prevent_expiring operations if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON:", "not sent to Content API. Start_index: %d, batch_size: %d,' 'initiation", "_logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE:", "task.batch_size) + 1 logging.info( '%s started. Batch #%d info: start_index:", "_create_optimized_batch(batch: constants.Batch, batch_number: int, operation: constants.Operation) -> constants.Batch: \"\"\"Creates an", "coding=utf-8 # Copyright 2021 Google LLC. # # Licensed under", "initiation timestamp %s successfully processed %s items, failed to process", "\"License\"); # you may not use this file except in", "process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT:", "request. Returns: The list of items that failed due to", "into a batch that can be sent to Content API", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "request_body = json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if task.batch_size == 0:", "attempt to the retry limit. Returns: int, the number of", "= 5 @app.route('/insert_items', methods=['POST']) def run_insert_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles", "purposes. # Should match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5", "Tuple[str, http.HTTPStatus]: \"\"\"Handles prevent expiring tasks pushed from Task Queue.\"\"\"", "http_error: logging.exception( 'Error loading items from %s.%s. HTTP status: %s.", "-> process_result.ProcessResult: \"\"\"Logs network related errors returned from Content API", "be performed on this batch (upsert, delete, prevent_expiring). Returns: The", "# distributed under the License is distributed on an \"AS", "running locally. Gunicorn is used to run the # application", "_run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]: \"\"\"Handles tasks pushed", "# Unless required by applicable law or agreed to in", "import bigquery_client import constants import content_api_client import result_recorder import shoptimizer_client", "methods=['POST']) def run_delete_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles deleting tasks pushed", "Content API for Shopping (Merchant Center). - Records the results", "-> int: \"\"\"Returns the number of times this task has", "_logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD = {", "batch_to_send_to_content_api = original_batch # Sends batch of items to Content", "-> Tuple[str, http.HTTPStatus]: \"\"\"Handles deleting tasks pushed from Task Queue.\"\"\"", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "thrown by Content API. item_rows: The items being processed in", "[], []) try: if not items: logging.error( 'Batch #%d, operation", "'Batch #%d, operation %s: 0 items loaded from BigQuery so", "from BigQuery. \"\"\" table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH,", "def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: \"\"\"Handles prevent expiring tasks pushed", "items: logging.error( 'Batch #%d, operation %s: 0 items loaded from", "int: \"\"\"Returns the number of times this task has previously", "error_reason) for item_row in item_rows ] api_result = process_result.ProcessResult([], item_failures,", "of times this task has previously been executed. If the", "operation) except (OSError, ValueError): return batch return optimization_client.shoptimize(batch) def _handle_content_api_error(", "error_reason: The reason for the error. batch_num: The batch number.", "You may obtain a copy of the License at #", "Optimizes batch via Shoptimizer for upsert/prevent_expiring operations if operation !=", "pushed from Task Queue.\"\"\" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process()", "Task Queue. This module receives batch jobs from TaskQueue. For", "Queue.\"\"\" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process() -> Tuple[str, http.HTTPStatus]:", "API for Shopping. - Sending items to Content API for", "logging.exception( 'Error loading items from %s.%s. HTTP status: %s. Error:", "mark every id as failed. item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing", "#%d with operation %s and initiation timestamp %s successfully processed", "try: items = _load_items_from_bigquery(operation, task) except errors.HttpError: return 'Error loading", "The items being processed in this batch. operation: The operation", "Queue message. The following processes are executed in this function:", "batch_num: The batch number. error: The error thrown by Content", "return TASK_RETRY_LIMIT if __name__ == '__main__': # This is used", "error. batch_num: The batch number. error: The error thrown by", "Tuple import flask from google.cloud import bigquery from google.cloud import", "table_id, http_error.resp.status, http_error.resp.reason) raise return list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number:", "the Apache License, Version 2.0 (the \"License\"); # you may", "module receives batch jobs from TaskQueue. For each job, the", "batch_number = int(task.start_index / task.batch_size) + 1 logging.info( '%s started.", "initiation timestamp %s will be requeued for retry', batch_num, operation.value,", "BigQuery and sends it to Merchant Center. \"\"\" import http", "== '__main__': # This is used when running locally. Gunicorn", "performed on this batch (upsert, delete, prevent_expiring). Returns: The batch", "items to Content API for Shopping (Merchant Center). - Records", "items loaded from BigQuery. \"\"\" table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client =", "List, Tuple import flask from google.cloud import bigquery from google.cloud", "if not items: logging.error( 'Batch #%d, operation %s: 0 items", "%s will be requeued for retry', batch_num, operation.value, task.timestamp) else:", "HttpError, mark every id as failed. item_failures = [ failure.Failure(str(item_row.get('item_id'," ]
[ "ranges for the with-regions. \"\"\" def find_ranges(blocks): for blk in", "variables are actually used in the blocks, else remove, #", "return True _logger.info('finding looplift candidates') # the check for cfg.entry_point", "support with-context that contain branches \" \"(i.e. break/return/raise) that can", "code. # Thus, this loop is not a candidate. _logger.debug(\"return-statement", "del blocks[k] # update main interpreter callsite into the liftedloop", "inputs, outputs, returnto): \"\"\" Transform calling block from top-level function", "ok def cannot_yield(loop): \"cannot have yield inside the loop\" insiders", "sure what condition can trigger this error. msg = \"Entry", "cfg.successors(k)) if not succs: # If the exit point has", ") if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj,", "backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop in yield_loops_with_multiple_backedges():", "vs in defs.usemap.values(): used_vars |= vs for vs in defs.defmap.values():", "= {} for k in body_block_ids: loopblocks[k] = blocks[k] used_vars", "stable ordering inputs = sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs)", "smallest offset firstblk = min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue(", "Pre-Py3.8 may have multiple exits [(returnto, _)] = cfg.successors(an_exit) #", "\"\"\"Find input and output variables to a block region. \"\"\"", "the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) \"\"\"", "errors.CompilerError( \"malformed with-context usage\", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): \"\"\"Given *blk*,", "ir_utils from numba.analysis import compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg,", "\"\"\" Add new tail block that gathers all the backedges", "it contains an return # statement, which is not handled", "one entry\" ok = len(loop.entries) == 1 _logger.debug(\"one_entry=%s\", ok) return", "into jumps to new tail block newblk.body[-1] = replace_target(blk.terminator, header,", "if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( \"Unsupported context manager in", "in loop.exits: succs = set(x for x, _ in cfg.successors(k))", "# If the exit point has no successor, it contains", "if isinstance(dfn, ir.Expr) and dfn.op == 'call': args = [get_var_dfn(x)", "counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( \"with's head-block must have exactly", "postdoms = cfg.post_dominators() # Verify that the with-context has no", "Lifted with-block cannot looplift myflags.enable_looplift = False # Lifted with-block", "in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if e not in", "blocks, 'starting offset is not a label' known_ranges.append((s, e)) return", "body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node)", "scope = block.scope loc = block.loc blk = ir.Block(scope=scope, loc=loc)", "loc=term.loc) else: assert not term.get_targets() return term def rewrite_single_backedge(loop): \"\"\"", "[region_begin] while stack: tos = stack.pop() succs, _ = zip(*cfg.successors(tos))", "the kind of contextmanager sub_irs = [] for (blk_start, blk_end)", "loopblocks = {} for k in body_block_ids: loopblocks[k] = blocks[k]", "something valid to run through postproc # to achieve similar", "def _loop_lift_get_candidate_infos(cfg, blocks, livemap): \"\"\" Returns information on looplifting candidates.", "import compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): \"\"\" Returns", "\"all exits must point to the same location\" outedges =", "blk def _loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\" Inplace transform loop blocks for", "in blk.terminator.get_targets(): newblk = blk.copy() # rewrite backedge into jumps", "livemap): \"\"\" Returns information on looplifting candidates. \"\"\" loops =", "cannot_yield(loop) and cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug(\"add candidate: %s\",", "blocks): \"\"\" Inplace transform loop blocks for use as lifted", "that can leave the with-context. \" \"Details: exit of with-context", "e in withs: loc = blocks[s].loc if s not in", "would get written into block -1 # if a loop", "actually used in the blocks, else remove, # saves having", "absolute_import, print_function from collections import namedtuple, defaultdict import logging from", "= next(iter(loop.exits)) # anyone of the exit block if len(loop.exits)", "!= 1: raise errors.CompilerError( \"with's head-block must have exactly 1", "mutate them according to # the kind of contextmanager sub_irs", "this's possible if there's an exit path in the with-block", "force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) # modify", "return False outedges |= succs ok = len(outedges) == 1", "hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( \"Unsupported context manager in use\", loc=blocks[blk_start].loc,", "`(toplevel_interp, [loop0_interp, loop1_interp, ....])` \"\"\" blocks = func_ir.blocks.copy() cfg =", "= block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto,", "= loop.header tailkey = new_block_id() for blkkey in loop.body: blk", "them according to # the kind of contextmanager sub_irs =", "not post-dominating the entry. \" ) raise errors.CompilerError(msg, loc=loc) def", "logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): \"\"\" Returns a list of loops", "outedges |= succs ok = len(outedges) == 1 _logger.debug(\"same_exit_point=%s (%s)\",", "block 0 candidates = [] for loop in find_top_level_loops(cfg): _logger.debug(\"top-level", "dominating the exit.\" raise errors.CompilerError(msg, loc=loc) if e not in", "= _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = [] if loopinfos: _logger.debug('loop", "anyone of the exit block if len(loop.exits) > 1: #", "to prevent a bad # rewrite where a prelude for", "return candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): \"\"\"Find input", "as context manager\", loc=blocks[blk_start].loc, ) if ctxobj is None: raise", "loop) if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not", "context manager in use\", loc=blocks[blk_start].loc, ) return ctxobj, extra #", "cfg, blocks): \"\"\"Verify the CFG of the with-context(s). \"\"\" doms", "arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) #", "func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags,", "not support with-context that contain branches \" \"(i.e. break/return/raise) that", "....])` \"\"\" blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos =", "== 'call': args = [get_var_dfn(x) for x in dfn.args] kws", "blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError(", "are candidate for loop lifting \"\"\" # check well-formed-ness of", "region_end]) stack.extend(nodes) region_nodes |= nodes return region_nodes def _legalize_withs_cfg(withs, cfg,", "= sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs) & used_or_defined &", "into the liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop def loop_lifting(func_ir,", "= cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if", "exactly 1 JUMP\", loc=blk.loc, ) # Can have any number", "loop\" insiders = set(loop.body) | set(loop.entries) | set(loop.exits) for blk", "create new tail block entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope,", "Numba IR \"\"\" from __future__ import absolute_import, print_function from collections", "blk = newblocks[blkkey] if header in blk.terminator.get_targets(): newblk = blk.copy()", "stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes = set([node for node", "contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the", "= _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals) loops.append(lifted) #", "else remove, # saves having to create something valid to", "tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks):", "_cfg_nodes_in_region(cfg, region_begin, region_end): \"\"\"Find the set of CFG nodes that", "ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return", "node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the", "succs, _ = zip(*cfg.successors(tos)) nodes = set([node for node in", "the lifted-loop. Returns a dictionary of blocks of the lifted-loop.", "callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks", "have multiple exits [(returnto, _)] = cfg.successors(an_exit) # requirement checked", "blkkey in loop.body: blk = newblocks[blkkey] if header in blk.terminator.get_targets():", "1 _logger.debug(\"one_entry=%s\", ok) return ok def cannot_yield(loop): \"cannot have yield", "blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: # Unchanged", "valid to run through postproc # to achieve similar loopblocks", "of del counters.pop(ir.Del, None) # There MUST NOT be any", "not a candidate. _logger.debug(\"return-statement in loop.\") return False outedges |=", "& used_or_defined & def_vars) return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info',", "import logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba import", "if header in blk.terminator.get_targets(): newblk = blk.copy() # rewrite backedge", "a candidate. _logger.debug(\"return-statement in loop.\") return False outedges |= succs", "in succs if node not in region_nodes and node !=", "in defs.defmap.values(): def_vars |= vs used_or_defined = used_vars | def_vars", "Loop lifting transformation. Given a interpreter `func_ir` returns a 2", "in the blocks, else remove, # saves having to create", "loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return", "\"\"\" inputs = livemap[callfrom] outputs = livemap[returnto] # ensure live", "targetctx, flags, locals): \"\"\" Modify the block inplace to call", "tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` \"\"\" blocks = func_ir.blocks.copy()", "is a backedge? if loop.header in edges: count += 1", "lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals) loops.append(lifted)", "postdoms[s]: msg = ( \"Does not support with-context that contain", "blocks of the lifted-loop. \"\"\" from numba.dispatcher import LiftedLoop #", "_loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\" Inplace transform loop blocks for use as", "# Post-Py3.8 DO NOT have multiple exits returnto = an_exit", "new tail block that gathers all the backedges \"\"\" header", "in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra", "not in blocks: # this's possible if there's an exit", "with the smallest offset firstblk = min(blocks) - 1 blocks[firstblk]", "src, dst): def replace(target): return (dst if target == src", "loop lifting \"\"\" # check well-formed-ness of the loop def", "ensure live variables are actually used in the blocks, else", ") def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals): \"\"\"", "= ( \"Does not support with-context that contain branches \"", "of the exit block if len(loop.exits) > 1: # Pre-Py3.8", "loopblockkeys = set(loop.body) | set(loop.entries) if len(loop.exits) > 1: #", "get_var_dfn(v) for k, v in dfn.kws} extra = {'args': args,", "# Lifted with-block cannot looplift myflags.enable_looplift = False # Lifted", "global object used for the context manager \"\"\" _illegal_cm_msg =", "\"\"\" from __future__ import absolute_import, print_function from collections import namedtuple,", ") def _legalize_with_head(blk): \"\"\"Given *blk*, the head block of the", "| set(loop.exits) for blk in map(blocks.__getitem__, insiders): for inst in", "ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx,", "= zip(*cfg.successors(tos)) nodes = set([node for node in succs if", "tos = stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes = set([node", "find_ranges(blocks): for blk in blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield", "edges = blk.terminator.get_targets() # is a backedge? if loop.header in", "if a loop entry were in block 0 candidates =", "= stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes = set([node for", "**kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime assert func_ir.variable_lifetime", "var_ref = dfn.func else: extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value,", "from top-level function to call the lifted loop. \"\"\" scope", "errors.CompilerError( \"with's head-block must have exactly 1 ENTER_WITH\", loc=blk.loc, )", "start of the with-region for the contextmanager for stmt in", "new dictionary of blocks. \"\"\" return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx,", "object and extra info. The extra contains the arguments if", "bad # rewrite where a prelude for a lifted loop", "live variables are actually used in the blocks, else remove,", "use as lifted loop. \"\"\" entry_block = blocks[loopinfo.callfrom] scope =", "= ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the contextmanager object if", "looplifting candidates. \"\"\" loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos = []", "entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope loc = entry_block.loc #", "else. \"\"\" counters = defaultdict(int) for stmt in blk.body: counters[type(stmt)]", "blocks[k] edges = blk.terminator.get_targets() # is a backedge? if loop.header", "outputs = livemap[returnto] # ensure live variables are actually used", "ctxobj is ir.UNDEFINED: raise errors.CompilerError( \"Undefined variable used as context", "in cfg.loops().values(): if has_multiple_backedges(lp): yield lp def replace_target(term, src, dst):", "nodes that are in the given region \"\"\" region_nodes =", "block -1 # if a loop entry were in block", "the contextmanager for stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref", "with %d candidates:\\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos: lifted", "livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs,", "typingctx, targetctx, flags, locals) loops.append(lifted) # Make main IR main", "the exit block if len(loop.exits) > 1: # Pre-Py3.8 may", "loops that have multiple backedges. \"\"\" cfg = compute_cfg_from_blocks(blocks) newblocks", "= entry_block.loc # Lowering assumes the first block to be", "flags, locals) loops.append(lifted) # Make main IR main = func_ir.derive(blocks=blocks)", "of with-context not dominating the exit.\" raise errors.CompilerError(msg, loc=loc) if", "inside with block' ) assert s in blocks, 'starting offset", "extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check", "+= 1 if count > 1: # early exit return", "def same_exit_point(loop): \"all exits must point to the same location\"", "= ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, )", "(dst if target == src else target) if isinstance(term, ir.Branch):", "from numba.analysis import compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks):", "= newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header,", "return False def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if has_multiple_backedges(lp):", "head-block must have exactly 1 JUMP\", loc=blk.loc, ) # Can", "newblk = blk.copy() # rewrite backedge into jumps to new", "callsite into the liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop def", "func_ir.dump_to_string()) for loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks,", "call. \"\"\" # If the contextmanager used as a Call", "of the loop def same_exit_point(loop): \"all exits must point to", "Make main IR main = func_ir.derive(blocks=blocks) return main, loops def", "a block region. \"\"\" inputs = livemap[callfrom] outputs = livemap[returnto]", "loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): \"\"\" Transform calling", "- 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, )", "blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo,", "= livemap[returnto] # ensure live variables are actually used in", "if s >= a and s < b: return True", "# Lifted with-block uses object mode myflags.enable_pyobject = True myflags.force_pyobject", "# the check for cfg.entry_point in the loop.entries is to", "def replace_target(term, src, dst): def replace(target): return (dst if target", "it doesn't do anything else. \"\"\" counters = defaultdict(int) for", "_logger.debug(\"same_exit_point=%s (%s)\", ok, outedges) return ok def one_entry(loop): \"there is", "\"\"\"With-lifting transformation Rewrite the IR to extract all withs. Only", "func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir,", "outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\" Inplace transform", "# requirement checked earlier else: # Post-Py3.8 DO NOT have", "body and get new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start,", "in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): \"\"\" Rewrite the", "= len(outedges) == 1 _logger.debug(\"same_exit_point=%s (%s)\", ok, outedges) return ok", "loop. \"\"\" scope = block.scope loc = block.loc blk =", "for stable ordering inputs = sorted(set(inputs) & used_or_defined) outputs =", "ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug(\"has yield\") return False _logger.debug(\"no yield\")", "through postproc # to achieve similar loopblocks = {} for", "loopinfo.returnto) # remove blocks for k in loopblockkeys: del blocks[k]", "= tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def", "contains the arguments if the context-manager is used as a", "head-block must have exactly 1 ENTER_WITH\", loc=blk.loc, ) if counters.pop(ir.Jump)", "while stack: tos = stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes", "1 JUMP\", loc=blk.loc, ) # Can have any number of", "that gathers all the backedges \"\"\" header = loop.header tailkey", "have any number of del counters.pop(ir.Del, None) # There MUST", "e in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if e not", "doesn't do anything else. \"\"\" counters = defaultdict(int) for stmt", "region_nodes and node != region_end]) stack.extend(nodes) region_nodes |= nodes return", "logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba import ir,", "the contextmanager object if ctxobj is ir.UNDEFINED: raise errors.CompilerError( \"Undefined", "if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( \"with's head-block must have", "loop is not a candidate. _logger.debug(\"return-statement in loop.\") return False", "k in body_block_ids: loopblocks[k] = blocks[k] used_vars = set() def_vars", "withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) #", "IR \"\"\" from __future__ import absolute_import, print_function from collections import", "for x in dfn.args] kws = {k: get_var_dfn(v) for k,", "loc=blocks[blk_start].loc, ) return ctxobj, extra # No contextmanager found? raise", "new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): \"\"\"Get the global object", "If the exit point has no successor, it contains an", "transformation. Given a interpreter `func_ir` returns a 2 tuple of", "set() for k in loop.exits: succs = set(x for x,", "new IR for the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs),", "blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks for k in", "-1 # if a loop entry were in block 0", "False def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if has_multiple_backedges(lp): yield", "exits loopblockkeys |= loop.exits loopblocks = dict((k, blocks[k].copy()) for k", "def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags", "= find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli =", "the check for cfg.entry_point in the loop.entries is to prevent", "withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) \"\"\" from numba", "set(loop.entries) if len(loop.exits) > 1: # Pre-Py3.8 may have multiple", "to a block region. \"\"\" inputs = livemap[callfrom] outputs =", "True _logger.info('finding looplift candidates') # the check for cfg.entry_point in", "is one entry\" ok = len(loop.entries) == 1 _logger.debug(\"one_entry=%s\", ok)", "in blocks, 'starting offset is not a label' known_ranges.append((s, e))", "Implement transformation on Numba IR \"\"\" from __future__ import absolute_import,", "an return # statement, which is not handled by the", "from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba import ir, errors,", "nodes return region_nodes def _legalize_withs_cfg(withs, cfg, blocks): \"\"\"Verify the CFG", "raise errors.CompilerError( \"Unsupported context manager in use\", loc=blocks[blk_start].loc, ) return", "the with-context(s). \"\"\" doms = cfg.dominators() postdoms = cfg.post_dominators() #", "return True return False def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values():", "find_top_level_loops from numba import ir, errors, ir_utils from numba.analysis import", "are withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks)", "def _legalize_withs_cfg(withs, cfg, blocks): \"\"\"Verify the CFG of the with-context(s).", "doms = cfg.dominators() postdoms = cfg.post_dominators() # Verify that the", "[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier else: #", "\"\"\"Given *blk*, the head block of the with-context, check that", "information on looplifting candidates. \"\"\" loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos", "0 for k in loop.body: blk = blocks[k] edges =", "Returns the (the_new_ir, the_lifted_with_ir) \"\"\" from numba import postproc def", "note: sorted for stable ordering inputs = sorted(set(inputs) & used_or_defined)", "for the contextmanager for stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith):", "# this's possible if there's an exit path in the", "the loop.entries is to prevent a bad # rewrite where", "_loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals) loops.append(lifted) # Make", "region. \"\"\" inputs = livemap[callfrom] outputs = livemap[returnto] # ensure", "backedges \"\"\" header = loop.header tailkey = new_block_id() for blkkey", "vs for vs in defs.defmap.values(): def_vars |= vs used_or_defined =", "found? raise errors.CompilerError( \"malformed with-context usage\", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk):", "label_next=returnto, inputs=inputs, outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\"", "len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits loopblockkeys", "the start of the with-region for the contextmanager for stmt", "prelude for a lifted loop would get written into block", "def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if has_multiple_backedges(lp): yield lp", "liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove", "zip(*cfg.successors(tos)) nodes = set([node for node in succs if node", "to be the one with the smallest offset firstblk =", "mode myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper = False", "[get_var_dfn(x) for x in dfn.args] kws = {k: get_var_dfn(v) for", "blk in map(blocks.__getitem__, insiders): for inst in blk.body: if isinstance(inst,", "= callblock return liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags, locals):", "one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug(\"add", "return # statement, which is not handled by the looplifting", "loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks for k in loopblockkeys:", "a variable\"\"\" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): \"\"\"Return the context-manager object", "with-context has no side-exits for s, e in withs: loc", "blk in blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end", "numba.dispatcher import LiftedLoop # Copy loop blocks loop = loopinfo.loop", "loop blocks loop = loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries)", "blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra =", "not sub_irs: # Unchanged new_ir = func_ir else: new_ir =", "= set() def_vars = set() defs = compute_use_defs(loopblocks) for vs", "this IR with %d candidates:\\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in", "post-dominating the entry. \" ) raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks):", "IR for the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs),", "canonicalize the CFG. Returns a new dictionary of blocks. \"\"\"", "the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop", "return ctxobj, extra # Scan the start of the with-region", "myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else: cls = LiftedWith", "errors.CompilerError( \"Unsupported context manager in use\", loc=blocks[blk_start].loc, ) return ctxobj,", "sub_irs = [] for (blk_start, blk_end) in withs: body_blocks =", "if e not in postdoms[s]: msg = ( \"Does not", "def_vars) return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg,", "not previously_occurred(s, known_ranges): if e not in blocks: # this's", "= func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs def", "requirement checked earlier an_exit = next(iter(loop.exits)) # anyone of the", "blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs,", "if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra", "for cfg.entry_point in the loop.entries is to prevent a bad", "_loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop,", "the definition given a variable\"\"\" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): \"\"\"Return", "of the lifted-loop. \"\"\" from numba.dispatcher import LiftedLoop # Copy", "set() def_vars = set() defs = compute_use_defs(loopblocks) for vs in", "cfg, blocks) # For each with-regions, mutate them according to", "tailkey = new_block_id() for blkkey in loop.body: blk = newblocks[blkkey]", "blocks[k] # update main interpreter callsite into the liftedloop blocks[loopinfo.callfrom]", "return ok def cannot_yield(loop): \"cannot have yield inside the loop\"", "targetctx, flags, locals): \"\"\"With-lifting transformation Rewrite the IR to extract", "similar loopblocks = {} for k in body_block_ids: loopblocks[k] =", "# Thus, this loop is not a candidate. _logger.debug(\"return-statement in", "targetctx, flags, locals) loops.append(lifted) # Make main IR main =", "cannot looplift myflags.enable_looplift = False # Lifted with-block uses object", "used as context manager\", loc=blocks[blk_start].loc, ) if ctxobj is None:", "loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): \"\"\"", "from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode:", "the global object used for the context manager \"\"\" _illegal_cm_msg", "import ir, errors, ir_utils from numba.analysis import compute_use_defs _logger =", "used in the blocks, else remove, # saves having to", "dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op == 'call':", "early exit return True return False def yield_loops_with_multiple_backedges(): for lp", "== src else target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr),", "counters.pop(ir.Del, None) # There MUST NOT be any other statements", "of with-context not post-dominating the entry. \" ) raise errors.CompilerError(msg,", "term.get_targets() return term def rewrite_single_backedge(loop): \"\"\" Add new tail block", "loop would get written into block -1 # if a", "the with-context has no side-exits for s, e in withs:", "newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo,", "Rewrite the IR to extract all withs. Only the top-level", "args, 'kwargs': kws} var_ref = dfn.func else: extra = None", "_loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks for k", "trigger this error. msg = \"Entry of with-context not dominating", "def find_ranges(blocks): for blk in blocks.values(): for ew in blk.find_insts(ir.EnterWith):", "with. Returns a list of ranges for the with-regions. \"\"\"", "call to the lifted-loop. Returns a dictionary of blocks of", "callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs,", "kind of contextmanager sub_irs = [] for (blk_start, blk_end) in", "backedge? if loop.header in edges: count += 1 if count", "blocks): \"\"\" Returns a list of loops that are candidate", "Can have any number of del counters.pop(ir.Del, None) # There", "yield inside the loop\" insiders = set(loop.body) | set(loop.entries) |", "check the contextmanager object if ctxobj is ir.UNDEFINED: raise errors.CompilerError(", "backedges. \"\"\" cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id():", "with-context that contain branches \" \"(i.e. break/return/raise) that can leave", "exits must point to the same location\" outedges = set()", "if not succs: # If the exit point has no", "loop_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\" Loop lifting transformation. Given", "sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub)", "typingctx, targetctx, flags, locals): \"\"\" Modify the block inplace to", "CFG. Returns a new dictionary of blocks. \"\"\" return canonicalize_cfg_single_backedge(blocks)", "is ir.UNDEFINED: raise errors.CompilerError( \"Undefined variable used as context manager\",", "raise errors.CompilerError( \"illegal statements in with's head-block\", loc=blk.loc, ) def", "blocks, blk_start) # Mutate the body and get new IR", "newblocks[tailkey] = tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks", "branches \" \"(i.e. break/return/raise) that can leave the with-context. \"", "(same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not in loop.entries):", "1: # early exit return True return False def yield_loops_with_multiple_backedges():", "= [] for s, e in sorted(find_ranges(blocks)): if not previously_occurred(s,", "not in region_nodes and node != region_end]) stack.extend(nodes) region_nodes |=", "# Verify that the with-context has no side-exits for s,", "block to be the one with the smallest offset firstblk", "for blk in map(blocks.__getitem__, insiders): for inst in blk.body: if", "\"\"\"Verify the CFG of the with-context(s). \"\"\" doms = cfg.dominators()", "errors.CompilerError( \"with's head-block must have exactly 1 JUMP\", loc=blk.loc, )", "flags, locals): \"\"\"With-lifting transformation Rewrite the IR to extract all", "for (blk_start, blk_end) in withs: body_blocks = [] for node", "(%s)\", ok, outedges) return ok def one_entry(loop): \"there is one", "{k: get_var_dfn(v) for k, v in dfn.kws} extra = {'args':", "For each with-regions, mutate them according to # the kind", "k in loop.body: blk = blocks[k] edges = blk.terminator.get_targets() #", "blocks to canonicalize the CFG. Returns a new dictionary of", "extra # No contextmanager found? raise errors.CompilerError( \"malformed with-context usage\",", "new tail block entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc)", "LiftedLoop # Copy loop blocks loop = loopinfo.loop loopblockkeys =", "len(outedges) == 1 _logger.debug(\"same_exit_point=%s (%s)\", ok, outedges) return ok def", "= defaultdict(int) for stmt in blk.body: counters[type(stmt)] += 1 if", "due to return/raise ' 'statements inside with block' ) assert", "[] for loop in loops: [callfrom] = loop.entries # requirement", "loops.append(lifted) # Make main IR main = func_ir.derive(blocks=blocks) return main,", "s in blocks, 'starting offset is not a label' known_ranges.append((s,", "x, _ in cfg.successors(k)) if not succs: # If the", "outedges) return ok def one_entry(loop): \"there is one entry\" ok", "multiple exits returnto = an_exit local_block_ids = set(loop.body) | set(loop.entries)", "arguments if the context-manager is used as a call. \"\"\"", "output variables to a block region. \"\"\" inputs = livemap[callfrom]", "= blocks[k] edges = blk.terminator.get_targets() # is a backedge? if", "loops: [callfrom] = loop.entries # requirement checked earlier an_exit =", "return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\"With-lifting transformation", "with_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\"With-lifting transformation Rewrite the IR", "Post-Py3.8 DO NOT have multiple exits returnto = an_exit local_block_ids", "loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end): \"\"\"Find the set of", "run through postproc # to achieve similar loopblocks = {}", "for k in body_block_ids: loopblocks[k] = blocks[k] used_vars = set()", "has_multiple_backedges(lp): yield lp def replace_target(term, src, dst): def replace(target): return", "= _extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for loop in loops:", "def canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite loops that have multiple backedges. \"\"\"", "= min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs,", "= set(x for x, _ in cfg.successors(k)) if not succs:", "set(loop.exits) for blk in map(blocks.__getitem__, insiders): for inst in blk.body:", "# note: sorted for stable ordering inputs = sorted(set(inputs) &", "_legalize_with_head(blk): \"\"\"Given *blk*, the head block of the with-context, check", "# check well-formed-ness of the loop def same_exit_point(loop): \"all exits", "there's an exit path in the with-block raise errors.CompilerError( 'unsupported", "known_ranges): for a, b in known_ranges: if s >= a", "of the with-context, check that it doesn't do anything else.", "\"\"\" entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope loc = entry_block.loc", "stack.extend(nodes) region_nodes |= nodes return region_nodes def _legalize_withs_cfg(withs, cfg, blocks):", "\"\"\"Find the set of CFG nodes that are in the", "True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else: cls =", "\"\"\" counters = defaultdict(int) for stmt in blk.body: counters[type(stmt)] +=", "Returns a new dictionary of blocks. \"\"\" return canonicalize_cfg_single_backedge(blocks) def", "import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode: # Lifted", "loop.entries): candidates.append(loop) _logger.debug(\"add candidate: %s\", loop) return candidates def find_region_inout_vars(blocks,", "a call. \"\"\" # If the contextmanager used as a", "modify for calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs,", "candidates') # the check for cfg.entry_point in the loop.entries is", "ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if not", "len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo,", "loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags,", "set([node for node in succs if node not in region_nodes", "= vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each with-regions, mutate", "= blk.terminator.get_targets() # is a backedge? if loop.header in edges:", "liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) # modify for", "get_var_dfn(var): \"\"\"Get the definition given a variable\"\"\" return func_ir.get_definition(var) def", "s < b: return True return False known_ranges = []", "candidates. \"\"\" loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for", "replace_target(term, src, dst): def replace(target): return (dst if target ==", "path in the with-block raise errors.CompilerError( 'unsupported controlflow due to", "ENTER_WITH\", loc=blk.loc, ) if counters.pop(ir.Jump) != 1: raise errors.CompilerError( \"with's", "# ensure live variables are actually used in the blocks,", "for k in loopblockkeys: del blocks[k] # update main interpreter", "\"\"\" def find_ranges(blocks): for blk in blocks.values(): for ew in", "| set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto,", "requirement checked earlier else: # Post-Py3.8 DO NOT have multiple", "if counters.pop(ir.Jump) != 1: raise errors.CompilerError( \"with's head-block must have", "loc = block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop,", "callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo, blocks):", "exit.\" raise errors.CompilerError(msg, loc=loc) if e not in postdoms[s]: msg", "1 if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( \"with's head-block must", "1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto]", "location\" outedges = set() for k in loop.exits: succs =", "blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug(\"has yield\") return", "get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( \"Unsupported context manager", "= compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id(): return max(newblocks.keys()) +", "newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc))", "new_block_id(): return max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count = 0", "for use as lifted loop. \"\"\" entry_block = blocks[loopinfo.callfrom] scope", "_legalize_withs_cfg(withs, cfg, blocks): \"\"\"Verify the CFG of the with-context(s). \"\"\"", "# add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop", "if the context-manager is used as a call. \"\"\" #", "insiders): for inst in blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value,", "exit return True return False def yield_loops_with_multiple_backedges(): for lp in", "def _loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\" Inplace transform loop blocks for use", "\" \"(i.e. break/return/raise) that can leave the with-context. \" \"Details:", "def _extract_loop_lifting_candidates(cfg, blocks): \"\"\" Returns a list of loops that", "is not a candidate. _logger.debug(\"return-statement in loop.\") return False outedges", "assert s in blocks, 'starting offset is not a label'", "having to create something valid to run through postproc #", "if node not in region_nodes and node != region_end]) stack.extend(nodes)", "s, e in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if e", "blocks[k] used_vars = set() def_vars = set() defs = compute_use_defs(loopblocks)", "= None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the", "can trigger this error. msg = \"Entry of with-context not", "|= vs for vs in defs.defmap.values(): def_vars |= vs used_or_defined", "= ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks,", "list of ranges for the with-regions. \"\"\" def find_ranges(blocks): for", "region_nodes |= nodes return region_nodes def _legalize_withs_cfg(withs, cfg, blocks): \"\"\"Verify", "= LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) # modify for calling", "\"\"\" scope = block.scope loc = block.loc blk = ir.Block(scope=scope,", "number of del counters.pop(ir.Del, None) # There MUST NOT be", "def new_block_id(): return max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count =", "region_begin, region_end): \"\"\"Find the set of CFG nodes that are", "has no successor, it contains an return # statement, which", "func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where", "the entry. \" ) raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): \"\"\"Find", "used_or_defined = used_vars | def_vars # note: sorted for stable", "# If the contextmanager used as a Call dfn =", "Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op ==", "= cfg.successors(an_exit) # requirement checked earlier else: # Post-Py3.8 DO", ">= a and s < b: return True return False", "multiple exits [(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier", "object if ctxobj is ir.UNDEFINED: raise errors.CompilerError( \"Undefined variable used", "k in loopblockkeys: del blocks[k] # update main interpreter callsite", "with-context not dominating the exit.\" raise errors.CompilerError(msg, loc=loc) if e", "first block to be the one with the smallest offset", "anything else. \"\"\" counters = defaultdict(int) for stmt in blk.body:", "= used_vars | def_vars # note: sorted for stable ordering", "the (the_new_ir, the_lifted_with_ir) \"\"\" from numba import postproc def dispatcher_factory(func_ir,", ") # Can have any number of del counters.pop(ir.Del, None)", "if not previously_occurred(s, known_ranges): if e not in blocks: #", "return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks,", "min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom,", "# Mutate the body and get new IR sub =", "with-block uses object mode myflags.enable_pyobject = True myflags.force_pyobject = True", "= [] for loop in loops: [callfrom] = loop.entries #", "# is a backedge? if loop.header in edges: count +=", "return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): \"\"\"Return the context-manager object and extra", "func_ir.variable_lifetime.livemap) loops = [] if loopinfos: _logger.debug('loop lifting this IR", "with-context, check that it doesn't do anything else. \"\"\" counters", "insiders = set(loop.body) | set(loop.entries) | set(loop.exits) for blk in", "check that it doesn't do anything else. \"\"\" counters =", "locals): \"\"\" Modify the block inplace to call to the", "src else target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr),", "cls = ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir, typingctx,", "targetctx, flags, locals): \"\"\" Loop lifting transformation. Given a interpreter", "check well-formed-ness of the loop def same_exit_point(loop): \"all exits must", "`func_ir` returns a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])`", "del counters.pop(ir.Del, None) # There MUST NOT be any other", "loc = blocks[s].loc if s not in doms[e]: # Not", "# Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start)", "(the_new_ir, the_lifted_with_ir) \"\"\" from numba import postproc def dispatcher_factory(func_ir, objectmode=False,", "errors.CompilerError( \"Undefined variable used as context manager\", loc=blocks[blk_start].loc, ) if", "as a call. \"\"\" # If the contextmanager used as", "ensure we have variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime", "errors.CompilerError( \"illegal statements in with's head-block\", loc=blk.loc, ) def _cfg_nodes_in_region(cfg,", "myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls", "extra contains the arguments if the context-manager is used as", "same_exit_point(loop): \"all exits must point to the same location\" outedges", "other statements if counters: raise errors.CompilerError( \"illegal statements in with's", "loc = entry_block.loc # Lowering assumes the first block to", "+ 1 def has_multiple_backedges(loop): count = 0 for k in", "in loops: [callfrom] = loop.entries # requirement checked earlier an_exit", "Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) #", "statements if counters: raise errors.CompilerError( \"illegal statements in with's head-block\",", "raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra # Scan the start", "a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` \"\"\" blocks", "body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir", "to run through postproc # to achieve similar loopblocks =", "where a prelude for a lifted loop would get written", "if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug(\"has yield\") return False", "Create a new IR for the lifted loop lifted_ir =", "Returns a dictionary of blocks of the lifted-loop. \"\"\" from", "a interpreter `func_ir` returns a 2 tuple of `(toplevel_interp, [loop0_interp,", "are in the given region \"\"\" region_nodes = set() stack", "def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals): \"\"\" Modify", "stack: tos = stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes =", "all top-level with. Returns a list of ranges for the", "return ok def one_entry(loop): \"there is one entry\" ok =", "in withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start,", "postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith", "with-contexts regions are withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs,", "\"\"\" from numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from", "= [get_var_dfn(x) for x in dfn.args] kws = {k: get_var_dfn(v)", "candidates:\\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir,", "blk_end) in withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg,", "variables to a block region. \"\"\" inputs = livemap[callfrom] outputs", "postproc # to achieve similar loopblocks = {} for k", "the given blocks to canonicalize the CFG. Returns a new", "\"with's head-block must have exactly 1 ENTER_WITH\", loc=blk.loc, ) if", "CFG of the with-context(s). \"\"\" doms = cfg.dominators() postdoms =", "known_ranges): if e not in blocks: # this's possible if", "\"\"\"Find all top-level with. Returns a list of ranges for", "entry. \" ) raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): \"\"\"Find all", "postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime assert func_ir.variable_lifetime vlt", "\"\"\" _illegal_cm_msg = \"Illegal use of context-manager.\" def get_var_dfn(var): \"\"\"Get", "loop.\") return False outedges |= succs ok = len(outedges) ==", "contextmanager used as a Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn,", "# early exit return True return False def yield_loops_with_multiple_backedges(): for", "objectmode: # Lifted with-block cannot looplift myflags.enable_looplift = False #", "return liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\" Loop", "main = func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite", "where with-contexts regions are withs = find_setupwiths(blocks) cfg = vlt.cfg", "in the loop.entries is to prevent a bad # rewrite", "if isinstance(inst.value, ir.Yield): _logger.debug(\"has yield\") return False _logger.debug(\"no yield\") return", "an_exit local_block_ids = set(loop.body) | set(loop.entries) inputs, outputs = find_region_inout_vars(", "in the with-block raise errors.CompilerError( 'unsupported controlflow due to return/raise", "lifting this IR with %d candidates:\\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo", "blk_start) # Mutate the body and get new IR sub", "def has_multiple_backedges(loop): count = 0 for k in loop.body: blk", "to call the lifted loop. \"\"\" scope = block.scope loc", "extra = {'args': args, 'kwargs': kws} var_ref = dfn.func else:", "a list of ranges for the with-regions. \"\"\" def find_ranges(blocks):", "leave the with-context. \" \"Details: exit of with-context not post-dominating", "0 candidates = [] for loop in find_top_level_loops(cfg): _logger.debug(\"top-level loop:", "= dfn.func else: extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir,", "the IR to extract all withs. Only the top-level withs", "the looplifting code. # Thus, this loop is not a", "into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) #", "== 1 _logger.debug(\"same_exit_point=%s (%s)\", ok, outedges) return ok def one_entry(loop):", "rewrite backedge into jumps to new tail block newblk.body[-1] =", "into block -1 # if a loop entry were in", "Returns a list of loops that are candidate for loop", "Inplace transform loop blocks for use as lifted loop. \"\"\"", "1 def has_multiple_backedges(loop): count = 0 for k in loop.body:", "loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return blk", "def_vars |= vs used_or_defined = used_vars | def_vars # note:", "import LiftedLoop # Copy loop blocks loop = loopinfo.loop loopblockkeys", "assert not term.get_targets() return term def rewrite_single_backedge(loop): \"\"\" Add new", "# Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks) return", "remove blocks for k in loopblockkeys: del blocks[k] # update", "as a Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and", "[] for s, e in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges):", "that are candidate for loop lifting \"\"\" # check well-formed-ness", "= _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body and get", "to call to the lifted-loop. Returns a dictionary of blocks", "isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if", "newblocks = blocks.copy() def new_block_id(): return max(newblocks.keys()) + 1 def", "transformation on Numba IR \"\"\" from __future__ import absolute_import, print_function", "function to call the lifted loop. \"\"\" scope = block.scope", "in find_top_level_loops(cfg): _logger.debug(\"top-level loop: %s\", loop) if (same_exit_point(loop) and one_entry(loop)", "{'args': args, 'kwargs': kws} var_ref = dfn.func else: extra =", "= set([node for node in succs if node not in", "one_entry(loop): \"there is one entry\" ok = len(loop.entries) == 1", "main IR main = func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks):", "func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op == 'call': args =", "func_ir, var_ref) # check the contextmanager object if ctxobj is", "new_block_id() for blkkey in loop.body: blk = newblocks[blkkey] if header", "region \"\"\" region_nodes = set() stack = [region_begin] while stack:", "\"\"\" doms = cfg.dominators() postdoms = cfg.post_dominators() # Verify that", "= True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else: cls", "dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags =", "and s < b: return True return False known_ranges =", "in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags,", "__future__ import absolute_import, print_function from collections import namedtuple, defaultdict import", "return newblocks def canonicalize_cfg(blocks): \"\"\" Rewrite the given blocks to", "ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir, typingctx, targetctx, myflags,", "return True return False known_ranges = [] for s, e", "locals) loops.append(lifted) # Make main IR main = func_ir.derive(blocks=blocks) return", "_cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind,", "numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import", "= flags.copy() if objectmode: # Lifted with-block cannot looplift myflags.enable_looplift", "returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto)", "blocks[k].copy()) for k in loopblockkeys) # Modify the loop blocks", "ctxobj, extra # No contextmanager found? raise errors.CompilerError( \"malformed with-context", "do anything else. \"\"\" counters = defaultdict(int) for stmt in", "has_multiple_backedges(loop): count = 0 for k in loop.body: blk =", "\"\"\" # If the contextmanager used as a Call dfn", "blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] =", "used_vars |= vs for vs in defs.defmap.values(): def_vars |= vs", "with-region for the contextmanager for stmt in blocks[blk_start].body: if isinstance(stmt,", "_loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): \"\"\" Transform calling block from", "kws} var_ref = dfn.func else: extra = None ctxobj =", "find_top_level_loops(cfg): _logger.debug(\"top-level loop: %s\", loop) if (same_exit_point(loop) and one_entry(loop) and", "used_or_defined) outputs = sorted(set(outputs) & used_or_defined & def_vars) return inputs,", "body_block_ids): \"\"\"Find input and output variables to a block region.", "func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): \"\"\"Get the", "of contextmanager sub_irs = [] for (blk_start, blk_end) in withs:", "used as a Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr)", "if there's an exit path in the with-block raise errors.CompilerError(", "a backedge? if loop.header in edges: count += 1 if", "written into block -1 # if a loop entry were", "(blk_start, blk_end) in withs: body_blocks = [] for node in", "loopinfo.outputs, loopinfo.returnto) # remove blocks for k in loopblockkeys: del", "cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug(\"add candidate: %s\", loop) return", "sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): \"\"\"Get the global object used", "with-context not post-dominating the entry. \" ) raise errors.CompilerError(msg, loc=loc)", "# if a loop entry were in block 0 candidates", "with's head-block\", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end): \"\"\"Find the", "region_nodes def _legalize_withs_cfg(withs, cfg, blocks): \"\"\"Verify the CFG of the", "# There MUST NOT be any other statements if counters:", "inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, )", "context manager \"\"\" _illegal_cm_msg = \"Illegal use of context-manager.\" def", "a list of loops that are candidate for loop lifting", "for stmt in blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) !=", "must have exactly 1 JUMP\", loc=blk.loc, ) # Can have", "block of the with-context, check that it doesn't do anything", "transformation Rewrite the IR to extract all withs. Only the", "region_end): \"\"\"Find the set of CFG nodes that are in", "\"Entry of with-context not dominating the exit.\" raise errors.CompilerError(msg, loc=loc)", "print_function from collections import namedtuple, defaultdict import logging from numba.analysis", "& def_vars) return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def", "in blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug(\"has yield\")", "withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end):", "exactly 1 ENTER_WITH\", loc=blk.loc, ) if counters.pop(ir.Jump) != 1: raise", "raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): \"\"\"Find all top-level with. Returns", "looplift myflags.enable_looplift = False # Lifted with-block uses object mode", "outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals):", "body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli)", "block entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add", "elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets()", "_ in cfg.successors(k)) if not succs: # If the exit", "to achieve similar loopblocks = {} for k in body_block_ids:", "exits returnto = an_exit local_block_ids = set(loop.body) | set(loop.entries) inputs,", "If the contextmanager used as a Call dfn = func_ir.get_definition(var_ref)", "for vs in defs.usemap.values(): used_vars |= vs for vs in", "stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj,", "inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, )", "extra) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir = func_ir", "if len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits", "else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks,", "in body_block_ids: loopblocks[k] = blocks[k] used_vars = set() def_vars =", "= blocks[k] used_vars = set() def_vars = set() defs =", "# modify for calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom],", "for lp in cfg.loops().values(): if has_multiple_backedges(lp): yield lp def replace_target(term,", "( \"Does not support with-context that contain branches \" \"(i.e.", "myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime", "loops def canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite loops that have multiple backedges.", "= LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run()", "\"Details: exit of with-context not post-dominating the entry. \" )", "= [] for loop in find_top_level_loops(cfg): _logger.debug(\"top-level loop: %s\", loop)", "extra # Scan the start of the with-region for the", "context-manager.\" def get_var_dfn(var): \"\"\"Get the definition given a variable\"\"\" return", "the with-context. \" \"Details: exit of with-context not post-dominating the", "typingctx, targetctx, flags, locals): \"\"\"With-lifting transformation Rewrite the IR to", "= sorted(set(outputs) & used_or_defined & def_vars) return inputs, outputs _loop_lift_info", "\"there is one entry\" ok = len(loop.entries) == 1 _logger.debug(\"one_entry=%s\",", "for loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx,", "def replace(target): return (dst if target == src else target)", "callblock return liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\"", "\"\"\" Returns information on looplifting candidates. \"\"\" loops = _extract_loop_lifting_candidates(cfg,", "count = 0 for k in loop.body: blk = blocks[k]", "errors.CompilerError( 'unsupported controlflow due to return/raise ' 'statements inside with", "ir.UNDEFINED: raise errors.CompilerError( \"Undefined variable used as context manager\", loc=blocks[blk_start].loc,", "x in dfn.args] kws = {k: get_var_dfn(v) for k, v", "in withs: loc = blocks[s].loc if s not in doms[e]:", "var_ref) # check the contextmanager object if ctxobj is ir.UNDEFINED:", "the context-manager is used as a call. \"\"\" # If", "doms[e]: # Not sure what condition can trigger this error.", "blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new IR for the", "the context manager \"\"\" _illegal_cm_msg = \"Illegal use of context-manager.\"", "a new IR for the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks,", "1 if count > 1: # early exit return True", "'call': args = [get_var_dfn(x) for x in dfn.args] kws =", "set(loop.body) | set(loop.entries) | set(loop.exits) for blk in map(blocks.__getitem__, insiders):", "the head block of the with-context, check that it doesn't", "Add new tail block that gathers all the backedges \"\"\"", "to canonicalize the CFG. Returns a new dictionary of blocks.", "loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): \"\"\" Rewrite", "an_exit = next(iter(loop.exits)) # anyone of the exit block if", "> 1: # Pre-Py3.8 may have multiple exits [(returnto, _)]", "= \"Illegal use of context-manager.\" def get_var_dfn(var): \"\"\"Get the definition", "'unsupported controlflow due to return/raise ' 'statements inside with block'", "blocks) # For each with-regions, mutate them according to #", "succs ok = len(outedges) == 1 _logger.debug(\"same_exit_point=%s (%s)\", ok, outedges)", "= func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op == 'call': args", "blk = blocks[k] edges = blk.terminator.get_targets() # is a backedge?", "the CFG. Returns a new dictionary of blocks. \"\"\" return", "# requirement checked earlier an_exit = next(iter(loop.exits)) # anyone of", "is used as a call. \"\"\" # If the contextmanager", "used_vars | def_vars # note: sorted for stable ordering inputs", "loop.exits: succs = set(x for x, _ in cfg.successors(k)) if", "def rewrite_single_backedge(loop): \"\"\" Add new tail block that gathers all", "stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise", "True return False def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if", "= cfg.post_dominators() # Verify that the with-context has no side-exits", "count > 1: # early exit return True return False", "= namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): \"\"\" Returns information", "func_ir.blocks.copy() # find where with-contexts regions are withs = find_setupwiths(blocks)", "= blk.copy() # rewrite backedge into jumps to new tail", "usage\", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): \"\"\"Given *blk*, the head block", "manager\", loc=blocks[blk_start].loc, ) if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc)", "None) # There MUST NOT be any other statements if", "yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): \"\"\" Rewrite the given", "withs. Only the top-level withs are extracted. Returns the (the_new_ir,", "for node in succs if node not in region_nodes and", "definition given a variable\"\"\" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): \"\"\"Return the", "contextmanager for stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref =", "rewrite_single_backedge(loop): \"\"\" Add new tail block that gathers all the", "cfg.post_dominators() # Verify that the with-context has no side-exits for", "locals): \"\"\"With-lifting transformation Rewrite the IR to extract all withs.", "\"\"\" Implement transformation on Numba IR \"\"\" from __future__ import", "to extract all withs. Only the top-level withs are extracted.", "well-formed-ness of the loop def same_exit_point(loop): \"all exits must point", "of the with-region for the contextmanager for stmt in blocks[blk_start].body:", "succs = set(x for x, _ in cfg.successors(k)) if not", "\"\"\" from numba.dispatcher import LiftedLoop # Copy loop blocks loop", "= dict((k, blocks[k].copy()) for k in loopblockkeys) # Modify the", "return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets() return term def", "loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for loop in", "by the looplifting code. # Thus, this loop is not", "return/raise ' 'statements inside with block' ) assert s in", "lp def replace_target(term, src, dst): def replace(target): return (dst if", "to the lifted-loop. Returns a dictionary of blocks of the", "the liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop def loop_lifting(func_ir, typingctx,", "loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new IR for", "in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start, known_ranges): for a,", "get new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks,", "blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start,", "NOT be any other statements if counters: raise errors.CompilerError( \"illegal", "are actually used in the blocks, else remove, # saves", ") lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return", "block' ) assert s in blocks, 'starting offset is not", "on looplifting candidates. \"\"\" loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos =", "inside the loop\" insiders = set(loop.body) | set(loop.entries) | set(loop.exits)", "statement, which is not handled by the looplifting code. #", "LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) # modify for calling into", "in use\", loc=blocks[blk_start].loc, ) return ctxobj, extra # No contextmanager", "tail block that gathers all the backedges \"\"\" header =", "blocks): \"\"\"Verify the CFG of the with-context(s). \"\"\" doms =", "which is not handled by the looplifting code. # Thus,", "def one_entry(loop): \"there is one entry\" ok = len(loop.entries) ==", "multiple exits loopblockkeys |= loop.exits loopblocks = dict((k, blocks[k].copy()) for", "set() defs = compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars |=", "collections import namedtuple, defaultdict import logging from numba.analysis import compute_cfg_from_blocks,", "return (dst if target == src else target) if isinstance(term,", "_logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): \"\"\" Returns a list", "context manager\", loc=blocks[blk_start].loc, ) if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg,", "lifting \"\"\" # check well-formed-ness of the loop def same_exit_point(loop):", "top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) \"\"\" from", "[] if loopinfos: _logger.debug('loop lifting this IR with %d candidates:\\n%s',", "return main, loops def canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite loops that have", "1: raise errors.CompilerError( \"with's head-block must have exactly 1 JUMP\",", "header in blk.terminator.get_targets(): newblk = blk.copy() # rewrite backedge into", "= entry_block.scope loc = entry_block.loc # Lowering assumes the first", "tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop)", "exit block if len(loop.exits) > 1: # Pre-Py3.8 may have", "' 'statements inside with block' ) assert s in blocks,", "if ctxobj is ir.UNDEFINED: raise errors.CompilerError( \"Undefined variable used as", "statements in with's head-block\", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end):", "next(iter(loop.exits)) # anyone of the exit block if len(loop.exits) >", "returnto): \"\"\" Transform calling block from top-level function to call", "myflags.enable_looplift = False # Lifted with-block uses object mode myflags.enable_pyobject", "blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs,", "loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = [] if loopinfos:", "canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\"With-lifting transformation Rewrite", "\"Unsupported context manager in use\", loc=blocks[blk_start].loc, ) return ctxobj, extra", "the with-region for the contextmanager for stmt in blocks[blk_start].body: if", "candidate. _logger.debug(\"return-statement in loop.\") return False outedges |= succs ok", "_ = zip(*cfg.successors(tos)) nodes = set([node for node in succs", "ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return", "block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs,", "contextmanager object if ctxobj is ir.UNDEFINED: raise errors.CompilerError( \"Undefined variable", "# rewrite where a prelude for a lifted loop would", "the first block to be the one with the smallest", "to new tail block newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey]", "loop.entries is to prevent a bad # rewrite where a", "body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir,", "for k in loop.exits: succs = set(x for x, _", "with block' ) assert s in blocks, 'starting offset is", "yield ew.begin, ew.end def previously_occurred(start, known_ranges): for a, b in", "that have multiple backedges. \"\"\" cfg = compute_cfg_from_blocks(blocks) newblocks =", "entry_block.loc # Lowering assumes the first block to be the", "_loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = [] if loopinfos: _logger.debug('loop lifting", "for blk in blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield ew.begin,", "for x, _ in cfg.successors(k)) if not succs: # If", "block, inputs, outputs, returnto): \"\"\" Transform calling block from top-level", "errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): \"\"\"Find all top-level with. Returns a", "edges: count += 1 if count > 1: # early", "= {'args': args, 'kwargs': kws} var_ref = dfn.func else: extra", "blocks. \"\"\" return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags, locals):", "contextmanager sub_irs = [] for (blk_start, blk_end) in withs: body_blocks", "# to achieve similar loopblocks = {} for k in", "candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): \"\"\"Find input and", "previously_occurred(s, known_ranges): if e not in blocks: # this's possible", "candidate: %s\", loop) return candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto,", "loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not", "assumes the first block to be the one with the", "all withs. Only the top-level withs are extracted. Returns the", "# Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a", "_logger.debug('loop lifting this IR with %d candidates:\\n%s', len(loopinfos), func_ir.dump_to_string()) for", "target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif", "myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else:", "manager \"\"\" _illegal_cm_msg = \"Illegal use of context-manager.\" def get_var_dfn(var):", "_logger.debug(\"no yield\") return True _logger.info('finding looplift candidates') # the check", "= newblocks[blkkey] if header in blk.terminator.get_targets(): newblk = blk.copy() #", "kws = {k: get_var_dfn(v) for k, v in dfn.kws} extra", "'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): \"\"\" Returns information on looplifting", "cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id(): return max(newblocks.keys())", "node != region_end]) stack.extend(nodes) region_nodes |= nodes return region_nodes def", "# ensure we have variable lifetime assert func_ir.variable_lifetime vlt =", "lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos", "for stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager", "defs = compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars |= vs", "gathers all the backedges \"\"\" header = loop.header tailkey =", "in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager", "block inplace to call to the lifted-loop. Returns a dictionary", "earlier an_exit = next(iter(loop.exits)) # anyone of the exit block", "possible if there's an exit path in the with-block raise", "|= vs used_or_defined = used_vars | def_vars # note: sorted", "k in loop.exits: succs = set(x for x, _ in", "Returns information on looplifting candidates. \"\"\" loops = _extract_loop_lifting_candidates(cfg, blocks)", "achieve similar loopblocks = {} for k in body_block_ids: loopblocks[k]", "replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk # create new tail", "of ranges for the with-regions. \"\"\" def find_ranges(blocks): for blk", "for the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True)", "import absolute_import, print_function from collections import namedtuple, defaultdict import logging", "raise errors.CompilerError( 'unsupported controlflow due to return/raise ' 'statements inside", "blk.terminator.get_targets() # is a backedge? if loop.header in edges: count", "outputs = sorted(set(outputs) & used_or_defined & def_vars) return inputs, outputs", "all the backedges \"\"\" header = loop.header tailkey = new_block_id()", "outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): \"\"\"", "var_ref = stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj,", "= ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue(", "target == src else target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond,", "_get_with_contextmanager(func_ir, blocks, blk_start): \"\"\"Get the global object used for the", "else: assert not term.get_targets() return term def rewrite_single_backedge(loop): \"\"\" Add", "candidates.append(loop) _logger.debug(\"add candidate: %s\", loop) return candidates def find_region_inout_vars(blocks, livemap,", "use of context-manager.\" def get_var_dfn(var): \"\"\"Get the definition given a", "extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( \"Unsupported", "\"cannot have yield inside the loop\" insiders = set(loop.body) |", "_logger.debug(\"one_entry=%s\", ok) return ok def cannot_yield(loop): \"cannot have yield inside", "liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop def loop_lifting(func_ir, typingctx, targetctx,", "in loop.body: blk = newblocks[blkkey] if header in blk.terminator.get_targets(): newblk", "have exactly 1 JUMP\", loc=blk.loc, ) # Can have any", "k in loopblockkeys) # Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks)", "for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find", "_loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): \"\"\" Returns", "yield\") return True _logger.info('finding looplift candidates') # the check for", "ew.end def previously_occurred(start, known_ranges): for a, b in known_ranges: if", "for blkkey in loop.body: blk = newblocks[blkkey] if header in", "extract all withs. Only the top-level withs are extracted. Returns", "top-level function to call the lifted loop. \"\"\" scope =", "IR main = func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks): \"\"\"", "defaultdict import logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba", "map(blocks.__getitem__, insiders): for inst in blk.body: if isinstance(inst, ir.Assign): if", "Modify the block inplace to call to the lifted-loop. Returns", "set(loop.body) | set(loop.entries) if len(loop.exits) > 1: # Pre-Py3.8 may", "the lifted loop. \"\"\" scope = block.scope loc = block.loc", "checked earlier else: # Post-Py3.8 DO NOT have multiple exits", "sub_irs: # Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks)", "entry\" ok = len(loop.entries) == 1 _logger.debug(\"one_entry=%s\", ok) return ok", "on Numba IR \"\"\" from __future__ import absolute_import, print_function from", "flags.copy() if objectmode: # Lifted with-block cannot looplift myflags.enable_looplift =", "|= succs ok = len(outedges) == 1 _logger.debug(\"same_exit_point=%s (%s)\", ok,", "interpreter callsite into the liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop", "func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite loops that", "count += 1 if count > 1: # early exit", "in defs.usemap.values(): used_vars |= vs for vs in defs.defmap.values(): def_vars", "ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return blk def", "for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): \"\"\"", "remove, # saves having to create something valid to run", "= logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): \"\"\" Returns a list of", "blocks for use as lifted loop. \"\"\" entry_block = blocks[loopinfo.callfrom]", "dst): def replace(target): return (dst if target == src else", "+= 1 if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( \"with's head-block", "cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body", "loc=blocks[blk_start].loc, ) if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return", "ir, errors, ir_utils from numba.analysis import compute_use_defs _logger = logging.getLogger(__name__)", "inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap):", "is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra # Scan", "not dominating the exit.\" raise errors.CompilerError(msg, loc=loc) if e not", "in loopblockkeys) # Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) #", "numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba import ir, errors, ir_utils", "= ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] =", "any other statements if counters: raise errors.CompilerError( \"illegal statements in", "the loop def same_exit_point(loop): \"all exits must point to the", "\" ) raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): \"\"\"Find all top-level", "loopblockkeys |= loop.exits loopblocks = dict((k, blocks[k].copy()) for k in", "that are in the given region \"\"\" region_nodes = set()", "checked earlier an_exit = next(iter(loop.exits)) # anyone of the exit", "used as a call. \"\"\" # If the contextmanager used", "sorted(set(outputs) & used_or_defined & def_vars) return inputs, outputs _loop_lift_info =", "\"\"\" Rewrite the given blocks to canonicalize the CFG. Returns", "= ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir, typingctx, targetctx,", "blocks, typingctx, targetctx, flags, locals): \"\"\" Modify the block inplace", "targetctx, flags, locals) # modify for calling into liftedloop callblock", "with-context usage\", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): \"\"\"Given *blk*, the head", "new_ir = func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs", "dfn.args] kws = {k: get_var_dfn(v) for k, v in dfn.kws}", "used for the context manager \"\"\" _illegal_cm_msg = \"Illegal use", "flags, locals) # modify for calling into liftedloop callblock =", "= blocks[s].loc if s not in doms[e]: # Not sure", "= find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For", "counters.pop(ir.Jump) != 1: raise errors.CompilerError( \"with's head-block must have exactly", "# Scan the start of the with-region for the contextmanager", "# Make main IR main = func_ir.derive(blocks=blocks) return main, loops", "IR with %d candidates:\\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos:", "in dfn.kws} extra = {'args': args, 'kwargs': kws} var_ref =", "%s\", loop) if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and cfg.entry_point()", "livemap[returnto] # ensure live variables are actually used in the", "update main interpreter callsite into the liftedloop blocks[loopinfo.callfrom] = callblock", "blocks[loopinfo.callfrom] scope = entry_block.scope loc = entry_block.loc # Lowering assumes", "errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra # Scan the start of", "if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not in", "2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` \"\"\" blocks =", "= len(loop.entries) == 1 _logger.debug(\"one_entry=%s\", ok) return ok def cannot_yield(loop):", "exit of with-context not post-dominating the entry. \" ) raise", "= compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = []", "blocks, livemap): \"\"\" Returns information on looplifting candidates. \"\"\" loops", "loc=blk.loc, ) # Can have any number of del counters.pop(ir.Del,", "context-manager is used as a call. \"\"\" # If the", "_loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals): \"\"\" Modify the", "sorted for stable ordering inputs = sorted(set(inputs) & used_or_defined) outputs", "= _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def", "blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs:", "of the with-context(s). \"\"\" doms = cfg.dominators() postdoms = cfg.post_dominators()", "cfg.successors(an_exit) # requirement checked earlier else: # Post-Py3.8 DO NOT", "# Lowering assumes the first block to be the one", "loop1_interp, ....])` \"\"\" blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos", "[] for loop in find_top_level_loops(cfg): _logger.debug(\"top-level loop: %s\", loop) if", "manager in use\", loc=blocks[blk_start].loc, ) return ctxobj, extra # No", "outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli", "ctxobj, extra # Scan the start of the with-region for", "have exactly 1 ENTER_WITH\", loc=blk.loc, ) if counters.pop(ir.Jump) != 1:", "the with-block raise errors.CompilerError( 'unsupported controlflow due to return/raise '", "# Create a new IR for the lifted loop lifted_ir", "vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where with-contexts", "= True myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls =", "CFG nodes that are in the given region \"\"\" region_nodes", "for k in loopblockkeys) # Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo,", "ctxobj, extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError(", "_logger.debug(\"top-level loop: %s\", loop) if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop)", "livemap, callfrom, returnto, body_block_ids): \"\"\"Find input and output variables to", "succs if node not in region_nodes and node != region_end])", "main interpreter callsite into the liftedloop blocks[loopinfo.callfrom] = callblock return", "flags, locals): \"\"\" Modify the block inplace to call to", "loopblockkeys: del blocks[k] # update main interpreter callsite into the", "= False cls = ObjModeLiftedWith else: cls = LiftedWith return", "inputs=inputs, outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\" Inplace", "find where with-contexts regions are withs = find_setupwiths(blocks) cfg =", "loop def same_exit_point(loop): \"all exits must point to the same", "must point to the same location\" outedges = set() for", "returns a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` \"\"\"", "newblk # create new tail block entryblk = newblocks[header] tailblk", "# create new tail block entryblk = newblocks[header] tailblk =", "in blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) != 1: raise", "for ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start, known_ranges):", "given region \"\"\" region_nodes = set() stack = [region_begin] while", "\"\"\" Transform calling block from top-level function to call the", "_logger.info('finding looplift candidates') # the check for cfg.entry_point in the", "loop entry were in block 0 candidates = [] for", "outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs,", "arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals)", "= _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks for", "= func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite loops", "func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): \"\"\"Return the context-manager object and extra info.", "an exit path in the with-block raise errors.CompilerError( 'unsupported controlflow", "Scan the start of the with-region for the contextmanager for", "Rewrite the given blocks to canonicalize the CFG. Returns a", "= func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where with-contexts regions", "ok, outedges) return ok def one_entry(loop): \"there is one entry\"", "Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks) return new_ir,", "<filename>dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py \"\"\" Implement transformation on Numba IR \"\"\" from __future__", "numba import ir, errors, ir_utils from numba.analysis import compute_use_defs _logger", "|= loop.exits loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys)", "'statements inside with block' ) assert s in blocks, 'starting", "typingctx, targetctx, flags, locals) # modify for calling into liftedloop", "create something valid to run through postproc # to achieve", "in block 0 candidates = [] for loop in find_top_level_loops(cfg):", "import namedtuple, defaultdict import logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops", "contains an return # statement, which is not handled by", "# update main interpreter callsite into the liftedloop blocks[loopinfo.callfrom] =", "region_nodes = set() stack = [region_begin] while stack: tos =", "and dfn.op == 'call': args = [get_var_dfn(x) for x in", "is not handled by the looplifting code. # Thus, this", "e not in postdoms[s]: msg = ( \"Does not support", "for k, v in dfn.kws} extra = {'args': args, 'kwargs':", "error. msg = \"Entry of with-context not dominating the exit.\"", "loop.body: blk = newblocks[blkkey] if header in blk.terminator.get_targets(): newblk =", "that the with-context has no side-exits for s, e in", "controlflow due to return/raise ' 'statements inside with block' )", ") blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir,", "locals): \"\"\" Loop lifting transformation. Given a interpreter `func_ir` returns", "regions are withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg,", "info. The extra contains the arguments if the context-manager is", "_loop_lift_get_candidate_infos(cfg, blocks, livemap): \"\"\" Returns information on looplifting candidates. \"\"\"", "|= nodes return region_nodes def _legalize_withs_cfg(withs, cfg, blocks): \"\"\"Verify the", "if e not in blocks: # this's possible if there's", "for loop in find_top_level_loops(cfg): _logger.debug(\"top-level loop: %s\", loop) if (same_exit_point(loop)", "block region. \"\"\" inputs = livemap[callfrom] outputs = livemap[returnto] #", "return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure", "= func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx,", "the_lifted_with_ir) \"\"\" from numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs):", "# the kind of contextmanager sub_irs = [] for (blk_start,", "\"\"\" return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\"With-lifting", "_illegal_cm_msg = \"Illegal use of context-manager.\" def get_var_dfn(var): \"\"\"Get the", "errors, ir_utils from numba.analysis import compute_use_defs _logger = logging.getLogger(__name__) def", "\"\"\" Loop lifting transformation. Given a interpreter `func_ir` returns a", "ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets() return term def rewrite_single_backedge(loop):", "known_ranges: if s >= a and s < b: return", "use\", loc=blocks[blk_start].loc, ) return ctxobj, extra # No contextmanager found?", "local_block_ids = set(loop.body) | set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks,", "= [] for (blk_start, blk_end) in withs: body_blocks = []", "and output variables to a block region. \"\"\" inputs =", "< b: return True return False known_ranges = [] for", "True myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith", "blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks,", "newblocks[blkkey] = newblk # create new tail block entryblk =", "k, v in dfn.kws} extra = {'args': args, 'kwargs': kws}", "s >= a and s < b: return True return", "else: # Post-Py3.8 DO NOT have multiple exits returnto =", "{} for k in body_block_ids: loopblocks[k] = blocks[k] used_vars =", "raise errors.CompilerError( \"malformed with-context usage\", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): \"\"\"Given", "\"\"\"Get the global object used for the context manager \"\"\"", "| set(loop.entries) if len(loop.exits) > 1: # Pre-Py3.8 may have", "the exit point has no successor, it contains an return", "and cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug(\"add candidate: %s\", loop)", "= {k: get_var_dfn(v) for k, v in dfn.kws} extra =", "rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): \"\"\" Rewrite the given blocks", "\"\"\" Returns a list of loops that are candidate for", "# statement, which is not handled by the looplifting code.", "def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): \"\"\" Transform calling block", "inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block,", "def find_setupwiths(blocks): \"\"\"Find all top-level with. Returns a list of", "outputs, returnto): \"\"\" Transform calling block from top-level function to", "ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk", "counters: raise errors.CompilerError( \"illegal statements in with's head-block\", loc=blk.loc, )", "lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx,", "# Pre-Py3.8 may have multiple exits [(returnto, _)] = cfg.successors(an_exit)", "%s\", loop) return candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids):", "must have exactly 1 ENTER_WITH\", loc=blk.loc, ) if counters.pop(ir.Jump) !=", "in region_nodes and node != region_end]) stack.extend(nodes) region_nodes |= nodes", "[loop0_interp, loop1_interp, ....])` \"\"\" blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks)", "NOT have multiple exits returnto = an_exit local_block_ids = set(loop.body)", "isinstance(dfn, ir.Expr) and dfn.op == 'call': args = [get_var_dfn(x) for", "def _legalize_with_head(blk): \"\"\"Given *blk*, the head block of the with-context,", "# Not sure what condition can trigger this error. msg", "> 1: # early exit return True return False def", "No contextmanager found? raise errors.CompilerError( \"malformed with-context usage\", loc=blocks[blk_start].loc, )", "blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra =", "a Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op", "= set() stack = [region_begin] while stack: tos = stack.pop()", "for calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs,", "# find where with-contexts regions are withs = find_setupwiths(blocks) cfg", "yield lp def replace_target(term, src, dst): def replace(target): return (dst", "# saves having to create something valid to run through", "transform loop blocks for use as lifted loop. \"\"\" entry_block", "check for cfg.entry_point in the loop.entries is to prevent a", "the set of CFG nodes that are in the given", "a and s < b: return True return False known_ranges", "\"\"\" loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for loop", "to # the kind of contextmanager sub_irs = [] for", "def_vars # note: sorted for stable ordering inputs = sorted(set(inputs)", ") assert s in blocks, 'starting offset is not a", "set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids,", "# anyone of the exit block if len(loop.exits) > 1:", "1: raise errors.CompilerError( \"with's head-block must have exactly 1 ENTER_WITH\",", "context-manager object and extra info. The extra contains the arguments", "len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits [(returnto,", "ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the contextmanager object if ctxobj", "def canonicalize_cfg(blocks): \"\"\" Rewrite the given blocks to canonicalize the", "LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode: # Lifted with-block", "'mutate_with_body'): raise errors.CompilerError( \"Unsupported context manager in use\", loc=blocks[blk_start].loc, )", "= loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries) if len(loop.exits) >", "return False _logger.debug(\"no yield\") return True _logger.info('finding looplift candidates') #", "that it doesn't do anything else. \"\"\" counters = defaultdict(int)", "successor, it contains an return # statement, which is not", "= blocks.copy() def new_block_id(): return max(newblocks.keys()) + 1 def has_multiple_backedges(loop):", "= [region_begin] while stack: tos = stack.pop() succs, _ =", "if not sub_irs: # Unchanged new_ir = func_ir else: new_ir", "def get_ctxmgr_obj(var_ref): \"\"\"Return the context-manager object and extra info. The", "return region_nodes def _legalize_withs_cfg(withs, cfg, blocks): \"\"\"Verify the CFG of", "Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new", "ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the contextmanager object", "in doms[e]: # Not sure what condition can trigger this", "blocks.copy() def new_block_id(): return max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count", "**kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if", "_legalize_withs_cfg(withs, cfg, blocks) # For each with-regions, mutate them according", "input and output variables to a block region. \"\"\" inputs", "_extract_loop_lifting_candidates(cfg, blocks): \"\"\" Returns a list of loops that are", "the context-manager object and extra info. The extra contains the", "loops that are candidate for loop lifting \"\"\" # check", "of loops that are candidate for loop lifting \"\"\" #", "ok def one_entry(loop): \"there is one entry\" ok = len(loop.entries)", "be the one with the smallest offset firstblk = min(blocks)", "newblocks def canonicalize_cfg(blocks): \"\"\" Rewrite the given blocks to canonicalize", "if counters: raise errors.CompilerError( \"illegal statements in with's head-block\", loc=blk.loc,", "\"\"\"Return the context-manager object and extra info. The extra contains", "prevent a bad # rewrite where a prelude for a", "loop in loops: [callfrom] = loop.entries # requirement checked earlier", "= [] if loopinfos: _logger.debug('loop lifting this IR with %d", "loop) return candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): \"\"\"Find", "lifted-loop. Returns a dictionary of blocks of the lifted-loop. \"\"\"", "_legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks,", "blocks, else remove, # saves having to create something valid", "vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each with-regions, mutate them", "if loopinfos: _logger.debug('loop lifting this IR with %d candidates:\\n%s', len(loopinfos),", "a, b in known_ranges: if s >= a and s", "if loop.header in edges: count += 1 if count >", "\"\"\" header = loop.header tailkey = new_block_id() for blkkey in", "= func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap)", "lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() #", "in dfn.args] kws = {k: get_var_dfn(v) for k, v in", "= set(loop.body) | set(loop.entries) if len(loop.exits) > 1: # Pre-Py3.8", "in loopblockkeys: del blocks[k] # update main interpreter callsite into", "loop = loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries) if len(loop.exits)", "in known_ranges: if s >= a and s < b:", "break/return/raise) that can leave the with-context. \" \"Details: exit of", "MUST NOT be any other statements if counters: raise errors.CompilerError(", "lp in cfg.loops().values(): if has_multiple_backedges(lp): yield lp def replace_target(term, src,", "object mode myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper =", "loop.body: blk = blocks[k] edges = blk.terminator.get_targets() # is a", "set() stack = [region_begin] while stack: tos = stack.pop() succs,", "variable used as context manager\", loc=blocks[blk_start].loc, ) if ctxobj is", "loopinfos: _logger.debug('loop lifting this IR with %d candidates:\\n%s', len(loopinfos), func_ir.dump_to_string())", "IR to extract all withs. Only the top-level withs are", "# No contextmanager found? raise errors.CompilerError( \"malformed with-context usage\", loc=blocks[blk_start].loc,", "the smallest offset firstblk = min(blocks) - 1 blocks[firstblk] =", "the with-regions. \"\"\" def find_ranges(blocks): for blk in blocks.values(): for", "blk.copy() # rewrite backedge into jumps to new tail block", "v in dfn.kws} extra = {'args': args, 'kwargs': kws} var_ref", "counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( \"with's", "condition can trigger this error. msg = \"Entry of with-context", "extra info. The extra contains the arguments if the context-manager", "find_setupwiths(blocks): \"\"\"Find all top-level with. Returns a list of ranges", "yield\") return False _logger.debug(\"no yield\") return True _logger.info('finding looplift candidates')", "block if len(loop.exits) > 1: # Pre-Py3.8 may have multiple", "loc=blk.loc, ) if counters.pop(ir.Jump) != 1: raise errors.CompilerError( \"with's head-block", "def cannot_yield(loop): \"cannot have yield inside the loop\" insiders =", "LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() #", "= set() defs = compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars", "flags, locals): \"\"\" Loop lifting transformation. Given a interpreter `func_ir`", ") return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\" Inplace transform loop", "[callfrom] = loop.entries # requirement checked earlier an_exit = next(iter(loop.exits))", "add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop in", "according to # the kind of contextmanager sub_irs = []", "extracted. Returns the (the_new_ir, the_lifted_with_ir) \"\"\" from numba import postproc", "point has no successor, it contains an return # statement,", "extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body and", "cannot_yield(loop): \"cannot have yield inside the loop\" insiders = set(loop.body)", "myflags = flags.copy() if objectmode: # Lifted with-block cannot looplift", "= [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start])", "get written into block -1 # if a loop entry", "ok = len(loop.entries) == 1 _logger.debug(\"one_entry=%s\", ok) return ok def", "isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug(\"has yield\") return False _logger.debug(\"no", "loop blocks for use as lifted loop. \"\"\" entry_block =", "isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets() return", "loop. \"\"\" entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope loc =", "Pre-Py3.8 may have multiple exits loopblockkeys |= loop.exits loopblocks =", "newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk # create", "for s, e in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if", "any number of del counters.pop(ir.Del, None) # There MUST NOT", "import compute_cfg_from_blocks, find_top_level_loops from numba import ir, errors, ir_utils from", "in cfg.successors(k)) if not succs: # If the exit point", "# For each with-regions, mutate them according to # the", "else: cls = LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals,", "\"Does not support with-context that contain branches \" \"(i.e. break/return/raise)", "loopblockkeys) # Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create", "_)] = cfg.successors(an_exit) # requirement checked earlier else: # Post-Py3.8", "if count > 1: # early exit return True return", "locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime assert", "blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start, known_ranges): for a, b", "b in known_ranges: if s >= a and s <", "for inst in blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield):", "blocks loop = loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries) if", "multiple backedges. \"\"\" cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def", "# Copy loop blocks loop = loopinfo.loop loopblockkeys = set(loop.body)", "main, loops def canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite loops that have multiple", "tail block entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) #", "have multiple exits returnto = an_exit local_block_ids = set(loop.body) |", "= new_block_id() for blkkey in loop.body: blk = newblocks[blkkey] if", "a loop entry were in block 0 candidates = []", "False # Lifted with-block uses object mode myflags.enable_pyobject = True", "no successor, it contains an return # statement, which is", "the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate", "a prelude for a lifted loop would get written into", "tail block newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk", "point to the same location\" outedges = set() for k", "for a, b in known_ranges: if s >= a and", "contextmanager found? raise errors.CompilerError( \"malformed with-context usage\", loc=blocks[blk_start].loc, ) def", "errors.CompilerError(msg, loc=loc) if e not in postdoms[s]: msg = (", "compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars |= vs for vs", "raise errors.CompilerError(msg, loc=loc) if e not in postdoms[s]: msg =", "in loop.body: blk = blocks[k] edges = blk.terminator.get_targets() # is", "not in postdoms[s]: msg = ( \"Does not support with-context", "loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys) # Modify", "lifting transformation. Given a interpreter `func_ir` returns a 2 tuple", "loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx,", "loop.header in edges: count += 1 if count > 1:", "typingctx, targetctx, flags, locals): \"\"\" Loop lifting transformation. Given a", "Mutate the body and get new IR sub = cmkind.mutate_with_body(func_ir,", "return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): \"\"\"Get the global", "blocks[s].loc if s not in doms[e]: # Not sure what", "'starting offset is not a label' known_ranges.append((s, e)) return known_ranges", "Lowering assumes the first block to be the one with", "for the context manager \"\"\" _illegal_cm_msg = \"Illegal use of", "inputs = livemap[callfrom] outputs = livemap[returnto] # ensure live variables", "Not sure what condition can trigger this error. msg =", "no side-exits for s, e in withs: loc = blocks[s].loc", "jumps to new tail block newblk.body[-1] = replace_target(blk.terminator, header, tailkey)", "return max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count = 0 for", "blocks) loopinfos = [] for loop in loops: [callfrom] =", "with-regions. \"\"\" def find_ranges(blocks): for blk in blocks.values(): for ew", "namedtuple, defaultdict import logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from", "False outedges |= succs ok = len(outedges) == 1 _logger.debug(\"same_exit_point=%s", "entry were in block 0 candidates = [] for loop", "yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if has_multiple_backedges(lp): yield lp def", "in loop.entries): candidates.append(loop) _logger.debug(\"add candidate: %s\", loop) return candidates def", "raise errors.CompilerError( \"with's head-block must have exactly 1 ENTER_WITH\", loc=blk.loc,", "= newblk # create new tail block entryblk = newblocks[header]", "that contain branches \" \"(i.e. break/return/raise) that can leave the", "= loop.entries # requirement checked earlier an_exit = next(iter(loop.exits)) #", "lifted loop. \"\"\" entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope loc", "block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx,", "loopinfo, blocks, typingctx, targetctx, flags, locals): \"\"\" Modify the block", "sub_irs.append(sub) if not sub_irs: # Unchanged new_ir = func_ir else:", "targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable", "= livemap[callfrom] outputs = livemap[returnto] # ensure live variables are", "msg = ( \"Does not support with-context that contain branches", "uses object mode myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper", "_logger.debug(\"return-statement in loop.\") return False outedges |= succs ok =", "JUMP\", loc=blk.loc, ) # Can have any number of del", "from numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher", "to create something valid to run through postproc # to", "\"\"\" # check well-formed-ness of the loop def same_exit_point(loop): \"all", "IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra)", "looplift candidates') # the check for cfg.entry_point in the loop.entries", "and get new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end,", "loop in find_top_level_loops(cfg): _logger.debug(\"top-level loop: %s\", loop) if (same_exit_point(loop) and", "backedge into jumps to new tail block newblk.body[-1] = replace_target(blk.terminator,", "exits [(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier else:", "ObjModeLiftedWith myflags = flags.copy() if objectmode: # Lifted with-block cannot", "the block inplace to call to the lifted-loop. Returns a", "\"\"\" Inplace transform loop blocks for use as lifted loop.", "succs: # If the exit point has no successor, it", "loopblocks) # Create a new IR for the lifted loop", "newblocks[blkkey] if header in blk.terminator.get_targets(): newblk = blk.copy() # rewrite", "and cannot_yield(loop) and cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug(\"add candidate:", "side-exits for s, e in withs: loc = blocks[s].loc if", "not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( \"Unsupported context manager in use\",", "have multiple backedges. \"\"\" cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy()", "withs: loc = blocks[s].loc if s not in doms[e]: #", "with-block raise errors.CompilerError( 'unsupported controlflow due to return/raise ' 'statements", "_loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new IR for the lifted", "inst in blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug(\"has", ") def _cfg_nodes_in_region(cfg, region_begin, region_end): \"\"\"Find the set of CFG", "loops = [] if loopinfos: _logger.debug('loop lifting this IR with", "ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra #", "s not in doms[e]: # Not sure what condition can", "1: # Pre-Py3.8 may have multiple exits loopblockkeys |= loop.exits", "with-context(s). \"\"\" doms = cfg.dominators() postdoms = cfg.post_dominators() # Verify", "body_block_ids: loopblocks[k] = blocks[k] used_vars = set() def_vars = set()", "lifted loop would get written into block -1 # if", "\"\"\" blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg,", "as lifted loop. \"\"\" entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope", "# rewrite backedge into jumps to new tail block newblk.body[-1]", "objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy()", "len(loop.entries) == 1 _logger.debug(\"one_entry=%s\", ok) return ok def cannot_yield(loop): \"cannot", "inplace to call to the lifted-loop. Returns a dictionary of", "= stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'):", "args = [get_var_dfn(x) for x in dfn.args] kws = {k:", "replace(target): return (dst if target == src else target) if", "have multiple exits loopblockkeys |= loop.exits loopblocks = dict((k, blocks[k].copy())", "\"Illegal use of context-manager.\" def get_var_dfn(var): \"\"\"Get the definition given", "exit path in the with-block raise errors.CompilerError( 'unsupported controlflow due", "vs in defs.defmap.values(): def_vars |= vs used_or_defined = used_vars |", "\"(i.e. break/return/raise) that can leave the with-context. \" \"Details: exit", "Transform calling block from top-level function to call the lifted", "get_ctxmgr_obj(var_ref): \"\"\"Return the context-manager object and extra info. The extra", "ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc)", "earlier else: # Post-Py3.8 DO NOT have multiple exits returnto", "cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we", "loopblocks[k] = blocks[k] used_vars = set() def_vars = set() defs", "block newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk #", "1 _logger.debug(\"same_exit_point=%s (%s)\", ok, outedges) return ok def one_entry(loop): \"there", "cfg.entry_point in the loop.entries is to prevent a bad #", "%d candidates:\\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos: lifted =", "known_ranges = [] for s, e in sorted(find_ranges(blocks)): if not", "head-block\", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end): \"\"\"Find the set", "to the same location\" outedges = set() for k in", "isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump):", "defaultdict(int) for stmt in blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith)", "# Pre-Py3.8 may have multiple exits loopblockkeys |= loop.exits loopblocks", "the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new IR", "blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: #", "the loop\" insiders = set(loop.body) | set(loop.entries) | set(loop.exits) for", "callfrom, returnto, body_block_ids): \"\"\"Find input and output variables to a", "def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): \"\"\"Find input and output", "blocks: # this's possible if there's an exit path in", "new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory,", "loc=loc) if e not in postdoms[s]: msg = ( \"Does", "= func_ir.blocks.copy() # find where with-contexts regions are withs =", "in with's head-block\", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end): \"\"\"Find", "can leave the with-context. \" \"Details: exit of with-context not", "_get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body and get new", "new_ir = func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start):", "block that gathers all the backedges \"\"\" header = loop.header", "= get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( \"Unsupported context", "lifted-loop. \"\"\" from numba.dispatcher import LiftedLoop # Copy loop blocks", "if has_multiple_backedges(lp): yield lp def replace_target(term, src, dst): def replace(target):", "may have multiple exits loopblockkeys |= loop.exits loopblocks = dict((k,", "in loop.\") return False outedges |= succs ok = len(outedges)", "set of CFG nodes that are in the given region", "\"illegal statements in with's head-block\", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin,", "a bad # rewrite where a prelude for a lifted", "else: extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) #", "in edges: count += 1 if count > 1: #", "max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count = 0 for k", "def loop_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\" Loop lifting transformation.", "dfn.func else: extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref)", "and extra info. The extra contains the arguments if the", "dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir =", "Returns a list of ranges for the with-regions. \"\"\" def", "used_or_defined & def_vars) return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto')", "interpreter `func_ir` returns a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp,", "outedges = set() for k in loop.exits: succs = set(x", "handled by the looplifting code. # Thus, this loop is", "we have variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks", "return ctxobj, extra # No contextmanager found? raise errors.CompilerError( \"malformed", "isinstance(inst.value, ir.Yield): _logger.debug(\"has yield\") return False _logger.debug(\"no yield\") return True", "def get_var_dfn(var): \"\"\"Get the definition given a variable\"\"\" return func_ir.get_definition(var)", "loop.entries # requirement checked earlier an_exit = next(iter(loop.exits)) # anyone", "numba.analysis import compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): \"\"\"", "nodes = set([node for node in succs if node not", "liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\" Loop lifting", "ew.begin, ew.end def previously_occurred(start, known_ranges): for a, b in known_ranges:", "lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop =", "from numba import ir, errors, ir_utils from numba.analysis import compute_use_defs", "blocks, typingctx, targetctx, flags, locals) loops.append(lifted) # Make main IR", "b: return True return False known_ranges = [] for s,", "have variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks =", "loopinfo, blocks, typingctx, targetctx, flags, locals) loops.append(lifted) # Make main", "= set(loop.body) | set(loop.entries) | set(loop.exits) for blk in map(blocks.__getitem__,", "the lifted-loop. \"\"\" from numba.dispatcher import LiftedLoop # Copy loop", "| def_vars # note: sorted for stable ordering inputs =", "> 1: # Pre-Py3.8 may have multiple exits loopblockkeys |=", "dictionary of blocks. \"\"\" return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx,", "rewrite where a prelude for a lifted loop would get", "Only the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir)", "compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): \"\"\" Returns a", "the body and get new IR sub = cmkind.mutate_with_body(func_ir, blocks,", "blk_start): \"\"\"Get the global object used for the context manager", "to return/raise ' 'statements inside with block' ) assert s", "of blocks. \"\"\" return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags,", "block.scope loc = block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk,", "Verify that the with-context has no side-exits for s, e", "loop.exits loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys) #", "top-level with. Returns a list of ranges for the with-regions.", "for the with-regions. \"\"\" def find_ranges(blocks): for blk in blocks.values():", "def with_lifting(func_ir, typingctx, targetctx, flags, locals): \"\"\"With-lifting transformation Rewrite the", "for loop lifting \"\"\" # check well-formed-ness of the loop", "in map(blocks.__getitem__, insiders): for inst in blk.body: if isinstance(inst, ir.Assign):", "return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): \"\"\" Transform", "compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = [] if", "= set() for k in loop.exits: succs = set(x for", "ok = len(outedges) == 1 _logger.debug(\"same_exit_point=%s (%s)\", ok, outedges) return", "assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find", "def _get_with_contextmanager(func_ir, blocks, blk_start): \"\"\"Get the global object used for", "msg = \"Entry of with-context not dominating the exit.\" raise", "*blk*, the head block of the with-context, check that it", "= 0 for k in loop.body: blk = blocks[k] edges", "calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto)", "dfn.op == 'call': args = [get_var_dfn(x) for x in dfn.args]", "compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id(): return max(newblocks.keys()) + 1", "the CFG of the with-context(s). \"\"\" doms = cfg.dominators() postdoms", "\"\"\" Rewrite loops that have multiple backedges. \"\"\" cfg =", "a lifted loop would get written into block -1 #", "Given a interpreter `func_ir` returns a 2 tuple of `(toplevel_interp,", "There MUST NOT be any other statements if counters: raise", "canonicalize_cfg(blocks): \"\"\" Rewrite the given blocks to canonicalize the CFG.", "from __future__ import absolute_import, print_function from collections import namedtuple, defaultdict", "if target == src else target) if isinstance(term, ir.Branch): return", "False _logger.debug(\"no yield\") return True _logger.info('finding looplift candidates') # the", "the one with the smallest offset firstblk = min(blocks) -", "return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target),", "blocks for k in loopblockkeys: del blocks[k] # update main", "scope = entry_block.scope loc = entry_block.loc # Lowering assumes the", "= \"Entry of with-context not dominating the exit.\" raise errors.CompilerError(msg,", "_extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for loop in loops: [callfrom]", "the backedges \"\"\" header = loop.header tailkey = new_block_id() for", "\"\"\" region_nodes = set() stack = [region_begin] while stack: tos", "func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops", "= set(loop.body) | set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap,", "is to prevent a bad # rewrite where a prelude", "returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto):", "Thus, this loop is not a candidate. _logger.debug(\"return-statement in loop.\")", "header = loop.header tailkey = new_block_id() for blkkey in loop.body:", "given blocks to canonicalize the CFG. Returns a new dictionary", "term def rewrite_single_backedge(loop): \"\"\" Add new tail block that gathers", "label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def", "calling block from top-level function to call the lifted loop.", "DO NOT have multiple exits returnto = an_exit local_block_ids =", "Lifted with-block uses object mode myflags.enable_pyobject = True myflags.force_pyobject =", "def_vars = set() defs = compute_use_defs(loopblocks) for vs in defs.usemap.values():", "lifted loop. \"\"\" scope = block.scope loc = block.loc blk", "set(loop.entries) | set(loop.exits) for blk in map(blocks.__getitem__, insiders): for inst", ") if counters.pop(ir.Jump) != 1: raise errors.CompilerError( \"with's head-block must", "this loop is not a candidate. _logger.debug(\"return-statement in loop.\") return", "are extracted. Returns the (the_new_ir, the_lifted_with_ir) \"\"\" from numba import", "numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode: #", "a new dictionary of blocks. \"\"\" return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir,", "for k in loop.body: blk = blocks[k] edges = blk.terminator.get_targets()", "in blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def", "True return False known_ranges = [] for s, e in", "the with-context, check that it doesn't do anything else. \"\"\"", "node not in region_nodes and node != region_end]) stack.extend(nodes) region_nodes", "used_vars = set() def_vars = set() defs = compute_use_defs(loopblocks) for", "stack = [region_begin] while stack: tos = stack.pop() succs, _", "blocks[loopinfo.callfrom] = callblock return liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags,", "in blocks: # this's possible if there's an exit path", "if objectmode: # Lifted with-block cannot looplift myflags.enable_looplift = False", "raise errors.CompilerError( \"Undefined variable used as context manager\", loc=blocks[blk_start].loc, )", "blocks, func_ir.variable_lifetime.livemap) loops = [] if loopinfos: _logger.debug('loop lifting this", "for vs in defs.defmap.values(): def_vars |= vs used_or_defined = used_vars", "canonicalize_cfg_single_backedge(blocks): \"\"\" Rewrite loops that have multiple backedges. \"\"\" cfg", "tailkey) newblocks[blkkey] = newblk # create new tail block entryblk", "else target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc)", "callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom,", "a dictionary of blocks of the lifted-loop. \"\"\" from numba.dispatcher", "set(x for x, _ in cfg.successors(k)) if not succs: #", "if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref)", "candidates = [] for loop in find_top_level_loops(cfg): _logger.debug(\"top-level loop: %s\",", "find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each", "[] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) #", "this error. msg = \"Entry of with-context not dominating the", "returnto = an_exit local_block_ids = set(loop.body) | set(loop.entries) inputs, outputs", "ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start, known_ranges): for", "1 ENTER_WITH\", loc=blk.loc, ) if counters.pop(ir.Jump) != 1: raise errors.CompilerError(", "same location\" outedges = set() for k in loop.exits: succs", "have yield inside the loop\" insiders = set(loop.body) | set(loop.entries)", "previously_occurred(start, known_ranges): for a, b in known_ranges: if s >=", "blocks, blk_start): \"\"\"Get the global object used for the context", "for loop in loops: [callfrom] = loop.entries # requirement checked", "blocks = func_ir.blocks.copy() # find where with-contexts regions are withs", "raise errors.CompilerError( \"with's head-block must have exactly 1 JUMP\", loc=blk.loc,", "the same location\" outedges = set() for k in loop.exits:", "has no side-exits for s, e in withs: loc =", "the exit.\" raise errors.CompilerError(msg, loc=loc) if e not in postdoms[s]:", "saves having to create something valid to run through postproc", "the arguments if the context-manager is used as a call.", "Copy loop blocks loop = loopinfo.loop loopblockkeys = set(loop.body) |", "typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have", "= replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk # create new", "from collections import namedtuple, defaultdict import logging from numba.analysis import", "the contextmanager used as a Call dfn = func_ir.get_definition(var_ref) if", "| set(loop.entries) | set(loop.exits) for blk in map(blocks.__getitem__, insiders): for", "_logger.debug(\"has yield\") return False _logger.debug(\"no yield\") return True _logger.info('finding looplift", "were in block 0 candidates = [] for loop in", "# Can have any number of del counters.pop(ir.Del, None) #", "node in succs if node not in region_nodes and node", "livemap[callfrom] outputs = livemap[returnto] # ensure live variables are actually", "of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` \"\"\" blocks = func_ir.blocks.copy() cfg", "loc=loc) def find_setupwiths(blocks): \"\"\"Find all top-level with. Returns a list", "of blocks of the lifted-loop. \"\"\" from numba.dispatcher import LiftedLoop", "set(loop.body) | set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom,", "loop.header tailkey = new_block_id() for blkkey in loop.body: blk =", "if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term,", "dict((k, blocks[k].copy()) for k in loopblockkeys) # Modify the loop", "loopinfos = [] for loop in loops: [callfrom] = loop.entries", "entry_block.scope loc = entry_block.loc # Lowering assumes the first block", "returnto, body_block_ids): \"\"\"Find input and output variables to a block", "not handled by the looplifting code. # Thus, this loop", "the blocks, else remove, # saves having to create something", "for s, e in withs: loc = blocks[s].loc if s", "with-block cannot looplift myflags.enable_looplift = False # Lifted with-block uses", "\"Undefined variable used as context manager\", loc=blocks[blk_start].loc, ) if ctxobj", "one with the smallest offset firstblk = min(blocks) - 1", "1: # Pre-Py3.8 may have multiple exits [(returnto, _)] =", "each with-regions, mutate them according to # the kind of", "not in loop.entries): candidates.append(loop) _logger.debug(\"add candidate: %s\", loop) return candidates", "func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where with-contexts regions are", "return False known_ranges = [] for s, e in sorted(find_ranges(blocks)):", "= an_exit local_block_ids = set(loop.body) | set(loop.entries) inputs, outputs =", "cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops =", "stmt in blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) != 1:", "e not in blocks: # this's possible if there's an", "counters = defaultdict(int) for stmt in blk.body: counters[type(stmt)] += 1", "of CFG nodes that are in the given region \"\"\"", "sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if e not in blocks:", "ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets() return term", "and one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not in loop.entries): candidates.append(loop)", "list of loops that are candidate for loop lifting \"\"\"", "in postdoms[s]: msg = ( \"Does not support with-context that", "ordering inputs = sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs) &", "_logger.debug(\"add candidate: %s\", loop) return candidates def find_region_inout_vars(blocks, livemap, callfrom,", "blk.terminator.get_targets(): newblk = blk.copy() # rewrite backedge into jumps to", "Rewrite loops that have multiple backedges. \"\"\" cfg = compute_cfg_from_blocks(blocks)", "not in doms[e]: # Not sure what condition can trigger", "'kwargs': kws} var_ref = dfn.func else: extra = None ctxobj", "# remove blocks for k in loopblockkeys: del blocks[k] #", "ir.Yield): _logger.debug(\"has yield\") return False _logger.debug(\"no yield\") return True _logger.info('finding", "given a variable\"\"\" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): \"\"\"Return the context-manager", "namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): \"\"\" Returns information on", "return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): \"\"\" Inplace transform loop blocks", "may have multiple exits [(returnto, _)] = cfg.successors(an_exit) # requirement", "inputs = sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs) & used_or_defined", "\"malformed with-context usage\", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): \"\"\"Given *blk*, the", "contain branches \" \"(i.e. break/return/raise) that can leave the with-context.", ") return ctxobj, extra # No contextmanager found? raise errors.CompilerError(", "None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the contextmanager", "defs.usemap.values(): used_vars |= vs for vs in defs.defmap.values(): def_vars |=", "cfg.dominators() postdoms = cfg.post_dominators() # Verify that the with-context has", "of context-manager.\" def get_var_dfn(var): \"\"\"Get the definition given a variable\"\"\"", "truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else:", "= cfg.dominators() postdoms = cfg.post_dominators() # Verify that the with-context", "in the given region \"\"\" region_nodes = set() stack =", "cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each with-regions,", "locals) # modify for calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop,", "loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals)", "cls = LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs)", "call the lifted loop. \"\"\" scope = block.scope loc =", "\" \"Details: exit of with-context not post-dominating the entry. \"", "variable\"\"\" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): \"\"\"Return the context-manager object and", "loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for", "entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge", "be any other statements if counters: raise errors.CompilerError( \"illegal statements", "compute_cfg_from_blocks, find_top_level_loops from numba import ir, errors, ir_utils from numba.analysis", "None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra # Scan the", ") raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): \"\"\"Find all top-level with.", "tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey]", "ir.Expr) and dfn.op == 'call': args = [get_var_dfn(x) for x", "find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop,", "loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries) if len(loop.exits) > 1:", "vs used_or_defined = used_vars | def_vars # note: sorted for", "from numba.dispatcher import LiftedLoop # Copy loop blocks loop =", "if s not in doms[e]: # Not sure what condition", "= compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars |= vs for", "candidate for loop lifting \"\"\" # check well-formed-ness of the", "sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs) & used_or_defined & def_vars)", "cfg.loops().values(): if has_multiple_backedges(lp): yield lp def replace_target(term, src, dst): def", "falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert", "ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope,", "with-context. \" \"Details: exit of with-context not post-dominating the entry.", "== 1 _logger.debug(\"one_entry=%s\", ok) return ok def cannot_yield(loop): \"cannot have", "loc=dfn.loc) return ctxobj, extra # Scan the start of the", "return term def rewrite_single_backedge(loop): \"\"\" Add new tail block that", "def _cfg_nodes_in_region(cfg, region_begin, region_end): \"\"\"Find the set of CFG nodes", "blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra", "new tail block newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey] =", "variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy()", "loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): \"\"\"Given *blk*, the head block of", "loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir,", "find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): \"\"\"Find input and output variables", "import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith,", "for a lifted loop would get written into block -1", "The extra contains the arguments if the context-manager is used", "offset firstblk = min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope,", "[] for (blk_start, blk_end) in withs: body_blocks = [] for", "the given region \"\"\" region_nodes = set() stack = [region_begin]", "& used_or_defined) outputs = sorted(set(outputs) & used_or_defined & def_vars) return", "\"\"\"Get the definition given a variable\"\"\" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref):", "\"\"\" Modify the block inplace to call to the lifted-loop.", "!= region_end]) stack.extend(nodes) region_nodes |= nodes return region_nodes def _legalize_withs_cfg(withs,", "False known_ranges = [] for s, e in sorted(find_ranges(blocks)): if", "head block of the with-context, check that it doesn't do", "= False # Lifted with-block uses object mode myflags.enable_pyobject =", "with-regions, mutate them according to # the kind of contextmanager", "False cls = ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir,", "loop: %s\", loop) if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and", "dictionary of blocks of the lifted-loop. \"\"\" from numba.dispatcher import", "# check the contextmanager object if ctxobj is ir.UNDEFINED: raise", "and node != region_end]) stack.extend(nodes) region_nodes |= nodes return region_nodes", "not succs: # If the exit point has no successor,", "dfn.kws} extra = {'args': args, 'kwargs': kws} var_ref = dfn.func", "exit point has no successor, it contains an return #", "firstblk = min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc),", "loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs,", "def previously_occurred(start, known_ranges): for a, b in known_ranges: if s", "\"with's head-block must have exactly 1 JUMP\", loc=blk.loc, ) #", "what condition can trigger this error. msg = \"Entry of", "cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not", "= func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): \"\"\"Get", "\"\"\" cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id(): return", "block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc),", "not term.get_targets() return term def rewrite_single_backedge(loop): \"\"\" Add new tail", "block from top-level function to call the lifted loop. \"\"\"", "header, tailkey) newblocks[blkkey] = newblk # create new tail block", "= blocks[loopinfo.callfrom] scope = entry_block.scope loc = entry_block.loc # Lowering", "s, e in withs: loc = blocks[s].loc if s not", "= block.scope loc = block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call(", "ok) return ok def cannot_yield(loop): \"cannot have yield inside the", "object used for the context manager \"\"\" _illegal_cm_msg = \"Illegal", "defs.defmap.values(): def_vars |= vs used_or_defined = used_vars | def_vars #", "looplifting code. # Thus, this loop is not a candidate." ]
[ "], ] TEST_CASE_2 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\":", "from unittest import skipUnless import numpy as np from numpy.testing", "\"CMU-1\", \"mask_location\": [101, 100], }, ], ] TEST_CASE_OPENSLIDE_0 = [", "\"CMU-1\", \"mask_location\": [101, 100], }, { \"image\": np.array([[[243]], [[243]], [[243]]],", ") @skipUnless(has_cim, \"Requires CuCIM\") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset", "\"OpenSlide\", }, [ { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\":", "OpenSlide\") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset,", "{ \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\",", "// 2, HEIGHT // 2)) mask[100, 100] = 1 np.save(MASK1,", "\"mask_location\": [101, 100], }, ], ] TEST_CASE_OPENSLIDE_0 = [ {", "}, ], ] TEST_CASE_OPENSLIDE_0 = [ { \"data\": [ {\"image\":", "numpy.testing import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import", "dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ]", "\"mask\": MASK1}, ], \"patch_size\": 2, \"image_reader_name\": \"cuCIM\", }, [ {", "}, ], ] TEST_CASE_2 = [ { \"data\": [{\"image\": FILE_PATH,", "has_osl = optional_import(\"openslide\") FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\",", "\"mask_location\": [101, 101], }, ], ] TEST_CASE_3 = [ {", "@parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim,", "optional_import(\"openslide\") FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL)) MASK1", "parameterized import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import", "\"data\": [{\"image\": FILE_PATH, \"mask\": MASK4}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", },", "\"mask_location\": [100, 101], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_OPENSLIDE_1", "[ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK4}], \"patch_size\": 1, \"image_reader_name\":", "[101, 100], }, ], ] TEST_CASE_2 = [ { \"data\":", "= [ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, {\"image\":", "import download_url from monai.utils import optional_import from tests.utils import skip_if_quick", "TEST_CASE_3 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1},", "download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4,", "FILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\")", "{ \"data\": [{\"image\": FILE_PATH, \"mask\": MASK4}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\",", "] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand(", "{\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 2, \"image_reader_name\": \"cuCIM\", },", "TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, \"Requires OpenSlide\") @skip_if_quick def test_read_patches_openslide(self,", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, { \"image\": np.array([[[243]],", "\"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\":", "\"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), \"testing_data\",", "MaskedInferenceWSIDataset from monai.apps.utils import download_url from monai.utils import optional_import from", "TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, \"Requires CuCIM\") @skip_if_quick", "np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 101], },", "\"tissue_mask4.npy\") HEIGHT = 32914 WIDTH = 46000 def prepare_data(): mask", "import unittest from unittest import skipUnless import numpy as np", "monai.apps.utils import download_url from monai.utils import optional_import from tests.utils import", "}, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH,", "[100, 101], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\":", "= optional_import(\"openslide\") FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL))", "] TEST_CASE_OPENSLIDE_0 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\":", "expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"],", "TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, \"Requires OpenSlide\") @skip_if_quick def test_read_patches_openslide(self, input_parameters,", "MASK2}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\":", "import skip_if_quick _, has_cim = optional_import(\"cucim\") _, has_osl = optional_import(\"openslide\")", "}, ], ] TEST_CASE_1 = [ { \"data\": [{\"image\": FILE_PATH,", "}, ], ] TEST_CASE_3 = [ { \"data\": [ {\"image\":", "TEST_CASE_OPENSLIDE_0 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1},", "optional_import(\"cucim\") _, has_osl = optional_import(\"openslide\") FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH =", "input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset,", "100] = 1 np.save(MASK1, mask) mask[100, 100:102] = 1 np.save(MASK2,", "[101, 100], }, ], ] TEST_CASE_OPENSLIDE_0 = [ { \"data\":", "\"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_4 = [", "expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"]) self.assertListEqual(dataset[i][0][\"mask_location\"], expected[i][\"mask_location\"]) if __name__ ==", "[[243, 243], [243, 243]], [[243, 243], [243, 243]], [[243, 243],", "FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ {", "\"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array( [ [[243, 243],", "expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1,", "{ \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1,", "np.save(MASK2, mask) mask[100:102, 100:102] = 1 np.save(MASK4, mask) TEST_CASE_0 =", "optional_import from tests.utils import skip_if_quick _, has_cim = optional_import(\"cucim\") _,", "np.zeros((WIDTH // 2, HEIGHT // 2)) mask[100, 100] = 1", "{ \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100,", "\"name\": \"CMU-1\", \"mask_location\": [100, 100], }, { \"image\": np.array([[[243]], [[243]],", "= [ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, ],", "{ \"image\": np.array( [ [[243, 243], [243, 243]], [[243, 243],", "\"mask\": MASK4}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\":", "], ] TEST_CASE_OPENSLIDE_1 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\":", "parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import download_url from", "= 1 np.save(MASK1, mask) mask[100, 100:102] = 1 np.save(MASK2, mask)", "\"tissue_mask1.npy\") MASK2 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\") MASK4 = os.path.join(os.path.dirname(__file__), \"testing_data\",", ") @skipUnless(has_osl, \"Requires OpenSlide\") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset", "100], }, ], ] TEST_CASE_1 = [ { \"data\": [{\"image\":", "243]], [[243, 243], [243, 243]], ], dtype=np.uint8, ), \"name\": \"CMU-1\",", "[{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [", "skip_if_quick _, has_cim = optional_import(\"cucim\") _, has_osl = optional_import(\"openslide\") FILE_URL", "], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\")", "os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\") MASK4 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\") HEIGHT =", "100], }, ], ] TEST_CASE_4 = [ { \"data\": [", "], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array([[[243]],", "MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\":", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 101], }, { \"image\": np.array([[[243]],", "MASK1 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\") MASK2 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\")", "FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [", "mask) mask[100:102, 100:102] = 1 np.save(MASK4, mask) TEST_CASE_0 = [", "\"testing_data\", \"tissue_mask1.npy\") MASK2 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\") MASK4 = os.path.join(os.path.dirname(__file__),", "[[243, 243], [243, 243]], ], dtype=np.uint8, ), \"name\": \"CMU-1\", \"mask_location\":", "= MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] )", "TEST_CASE_4, ] ) @skipUnless(has_cim, \"Requires CuCIM\") @skip_if_quick def test_read_patches_cucim(self, input_parameters,", "\"image_reader_name\": \"OpenSlide\", }, [ { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),", "def test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand(", "= 32914 WIDTH = 46000 def prepare_data(): mask = np.zeros((WIDTH", "setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2,", "FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL)) MASK1 =", "\"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),", "from monai.apps.utils import download_url from monai.utils import optional_import from tests.utils", "243], [243, 243]], [[243, 243], [243, 243]], [[243, 243], [243,", "\"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ], ] TEST_CASE_OPENSLIDE_0 =", "for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"])", "\"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [ { \"image\": np.array([[[243]], [[243]],", "\"mask_location\": [101, 100], }, ], ] TEST_CASE_2 = [ {", "dtype=np.uint8, ), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ]", "\"mask_location\": [100, 100], }, ], ] TEST_CASE_OPENSLIDE_1 = [ {", "], ] TEST_CASE_4 = [ { \"data\": [ {\"image\": FILE_PATH,", "\"CMU-1\", \"mask_location\": [101, 100], }, ], ] TEST_CASE_2 = [", "\"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_OPENSLIDE_1 =", "\"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array([[[243]], [[243]],", "def test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def", "[ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\":", "\"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", },", "46000 def prepare_data(): mask = np.zeros((WIDTH // 2, HEIGHT //", "], ] TEST_CASE_3 = [ { \"data\": [ {\"image\": FILE_PATH,", "\"CMU-1\", \"mask_location\": [100, 101], }, { \"image\": np.array([[[243]], [[243]], [[243]]],", "HEIGHT = 32914 WIDTH = 46000 def prepare_data(): mask =", "np.save(MASK1, mask) mask[100, 100:102] = 1 np.save(MASK2, mask) mask[100:102, 100:102]", "= \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__),", "MASK4}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array([[[243]],", "[ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, {\"image\": FILE_PATH,", "[101, 100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data()", "FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 2, \"image_reader_name\": \"cuCIM\", }, [", "[ { \"image\": np.array( [ [[243, 243], [243, 243]], [[243,", "os import unittest from unittest import skipUnless import numpy as", "np from numpy.testing import assert_array_equal from parameterized import parameterized from", "tests.utils import skip_if_quick _, has_cim = optional_import(\"cucim\") _, has_osl =", "[100, 100], }, ], ] TEST_CASE_1 = [ { \"data\":", "[101, 100], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\":", "= [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK4}], \"patch_size\": 1,", "from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import download_url from monai.utils", "i in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"]) self.assertListEqual(dataset[i][0][\"mask_location\"],", "from numpy.testing import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets", "TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, \"Requires CuCIM\")", "2)) mask[100, 100] = 1 np.save(MASK1, mask) mask[100, 100:102] =", "\"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 101],", "\"Requires OpenSlide\") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters)", "[ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, \"Requires OpenSlide\") @skip_if_quick def", "= 1 np.save(MASK4, mask) TEST_CASE_0 = [ { \"data\": [", "[243, 243]], ], dtype=np.uint8, ), \"name\": \"CMU-1\", \"mask_location\": [100, 100],", "2, HEIGHT // 2)) mask[100, 100] = 1 np.save(MASK1, mask)", "import os import unittest from unittest import skipUnless import numpy", "\"mask_location\": [100, 100], }, ], ] TEST_CASE_4 = [ {", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ], ] TEST_CASE_OPENSLIDE_0", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_1", "assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from", "[[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ], ]", "download_url from monai.utils import optional_import from tests.utils import skip_if_quick _,", "mask = np.zeros((WIDTH // 2, HEIGHT // 2)) mask[100, 100]", "= 1 np.save(MASK2, mask) mask[100:102, 100:102] = 1 np.save(MASK4, mask)", "\"mask_location\": [100, 100], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),", "has_cim = optional_import(\"cucim\") _, has_osl = optional_import(\"openslide\") FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\"", "mask[100, 100:102] = 1 np.save(MASK2, mask) mask[100:102, 100:102] = 1", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ], ] TEST_CASE_2", "from tests.utils import skip_if_quick _, has_cim = optional_import(\"cucim\") _, has_osl", "np.array( [ [[243, 243], [243, 243]], [[243, 243], [243, 243]],", "[100, 100], }, ], ] TEST_CASE_4 = [ { \"data\":", "\"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, {\"image\": FILE_PATH, \"mask\": MASK2},", "[ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, \"Requires", "1, \"image_reader_name\": \"OpenSlide\", }, [ { \"image\": np.array([[[243]], [[243]], [[243]]],", "[{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [", "TEST_CASE_4 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1},", "\"cuCIM\", }, [ { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\":", "{\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", },", "] TEST_CASE_OPENSLIDE_1 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}],", "{ \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\",", "class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [", "\"Requires CuCIM\") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters)", "\"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ], ] TEST_CASE_2 =", "[[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 101], }, {", "mask) mask[100, 100:102] = 1 np.save(MASK2, mask) mask[100:102, 100:102] =", "self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"]) self.assertListEqual(dataset[i][0][\"mask_location\"], expected[i][\"mask_location\"]) if __name__ == \"__main__\":", "def prepare_data(): mask = np.zeros((WIDTH // 2, HEIGHT // 2))", "243]], [[243, 243], [243, 243]], [[243, 243], [243, 243]], ],", "test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [", "\"mask_location\": [101, 100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self):", "] TEST_CASE_2 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK4}],", "_, has_osl = optional_import(\"openslide\") FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH = os.path.join(os.path.dirname(__file__),", "from monai.utils import optional_import from tests.utils import skip_if_quick _, has_cim", "\"mask_location\": [100, 100], }, ], ] TEST_CASE_1 = [ {", "), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_4", "{ \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 2,", "\"name\": \"CMU-1\", \"mask_location\": [101, 101], }, ], ] TEST_CASE_3 =", "expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"]) self.assertListEqual(dataset[i][0][\"mask_location\"], expected[i][\"mask_location\"]) if __name__ == \"__main__\": unittest.main()", "monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import download_url from monai.utils import", "}, ], ] TEST_CASE_4 = [ { \"data\": [ {\"image\":", "prepare_data() download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3,", "}, [ { \"image\": np.array( [ [[243, 243], [243, 243]],", "def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1,", "\"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\":", "HEIGHT // 2)) mask[100, 100] = 1 np.save(MASK1, mask) mask[100,", "100], }, ], ] TEST_CASE_OPENSLIDE_0 = [ { \"data\": [", "TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, \"Requires CuCIM\") @skip_if_quick def test_read_patches_cucim(self,", "[ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\",", "{\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", },", "\"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase):", "\"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 101],", "] TEST_CASE_3 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\":", "\"image\": np.array( [ [[243, 243], [243, 243]], [[243, 243], [243,", "monai.utils import optional_import from tests.utils import skip_if_quick _, has_cim =", "np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], },", "100], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\",", "@skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected)", "] TEST_CASE_4 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\":", "[[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, {", "[ { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\":", "mask[100:102, 100:102] = 1 np.save(MASK4, mask) TEST_CASE_0 = [ {", "skipUnless import numpy as np from numpy.testing import assert_array_equal from", "\"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", },", "\"testing_data\", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\") MASK2 = os.path.join(os.path.dirname(__file__),", "WIDTH = 46000 def prepare_data(): mask = np.zeros((WIDTH // 2,", "\"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 2, \"image_reader_name\":", "def compare_samples_expected(self, dataset, expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape)", "dataset, expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"]))", "[[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ],", "100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL,", "os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\") MASK2 =", "], ] TEST_CASE_OPENSLIDE_0 = [ { \"data\": [ {\"image\": FILE_PATH,", "243]], ], dtype=np.uint8, ), \"name\": \"CMU-1\", \"mask_location\": [100, 100], },", "@skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected)", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, ], ] class", "[ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\",", "TEST_CASE_0 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1},", "100], }, ], ] TEST_CASE_2 = [ { \"data\": [{\"image\":", "\"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_1 = [", "\"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] )", "\"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ {", "\"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_4 =", "\"CMU-1\", \"mask_location\": [101, 100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def", "\"CMU-1\", \"mask_location\": [101, 101], }, ], ] TEST_CASE_3 = [", "FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [", "[{\"image\": FILE_PATH, \"mask\": MASK4}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [", "prepare_data(): mask = np.zeros((WIDTH // 2, HEIGHT // 2)) mask[100,", "2, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array( [ [[243,", "TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, \"Requires CuCIM\") @skip_if_quick def", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, { \"image\": np.array([[[243]],", "from parameterized import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils", "\"mask_location\": [101, 100], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),", "\"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_OPENSLIDE_1 = [", "\"mask\": MASK2}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ {", "= np.zeros((WIDTH // 2, HEIGHT // 2)) mask[100, 100] =", "[[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ]", "}, ], ] TEST_CASE_OPENSLIDE_1 = [ { \"data\": [{\"image\": FILE_PATH,", "MASK2}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array([[[243]],", "\"mask\": MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [ {", "range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"]) self.assertListEqual(dataset[i][0][\"mask_location\"], expected[i][\"mask_location\"]) if", "101], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\",", "\"testing_data\", \"tissue_mask2.npy\") MASK4 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\") HEIGHT = 32914", "_, has_cim = optional_import(\"cucim\") _, has_osl = optional_import(\"openslide\") FILE_URL =", "FILE_PATH, \"mask\": MASK2}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [", "1, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array([[[243]], [[243]], [[243]]],", "= 46000 def prepare_data(): mask = np.zeros((WIDTH // 2, HEIGHT", "= [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1,", "32914 WIDTH = 46000 def prepare_data(): mask = np.zeros((WIDTH //", "], \"patch_size\": 2, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array(", "CuCIM\") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset,", "= os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\") MASK2 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\") MASK4", "[[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 101], }, ], ]", "] ) @skipUnless(has_cim, \"Requires CuCIM\") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected):", "[ {\"image\": FILE_PATH, \"mask\": MASK1}, ], \"patch_size\": 2, \"image_reader_name\": \"cuCIM\",", "dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 101], }, ], ] TEST_CASE_3", "] ) @skipUnless(has_osl, \"Requires OpenSlide\") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected):", "as np from numpy.testing import assert_array_equal from parameterized import parameterized", "\"name\": \"CMU-1\", \"mask_location\": [101, 100], }, { \"image\": np.array([[[243]], [[243]],", "TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [ TEST_CASE_0,", "<filename>tests/test_masked_inference_wsi_dataset.py import os import unittest from unittest import skipUnless import", "import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import download_url", "= os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\") HEIGHT = 32914 WIDTH = 46000", "\"tissue_mask2.npy\") MASK4 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\") HEIGHT = 32914 WIDTH", "unittest import skipUnless import numpy as np from numpy.testing import", "np.save(MASK4, mask) TEST_CASE_0 = [ { \"data\": [ {\"image\": FILE_PATH,", "1 np.save(MASK2, mask) mask[100:102, 100:102] = 1 np.save(MASK4, mask) TEST_CASE_0", "@parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, \"Requires OpenSlide\") @skip_if_quick", "MASK4 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\") HEIGHT = 32914 WIDTH =", "\"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100],", "numpy as np from numpy.testing import assert_array_equal from parameterized import", "[ [[243, 243], [243, 243]], [[243, 243], [243, 243]], [[243,", "MASK2}], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [ { \"image\": np.array([[[243]],", "[[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, { \"image\":", "\"cuCIM\", }, [ { \"image\": np.array( [ [[243, 243], [243,", "mask) TEST_CASE_0 = [ { \"data\": [ {\"image\": FILE_PATH, \"mask\":", "[[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], }, { \"image\":", "] TEST_CASE_1 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}],", "\"testing_data\", \"tissue_mask4.npy\") HEIGHT = 32914 WIDTH = 46000 def prepare_data():", "= optional_import(\"cucim\") _, has_osl = optional_import(\"openslide\") FILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\" FILE_PATH", "os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\") HEIGHT = 32914 WIDTH = 46000 def", "[[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 101], }, ],", "\"name\": \"CMU-1\", \"mask_location\": [100, 101], }, { \"image\": np.array([[[243]], [[243]],", "import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset", "expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected):", "dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected): for", "{\"image\": FILE_PATH, \"mask\": MASK2}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", },", "import MaskedInferenceWSIDataset from monai.apps.utils import download_url from monai.utils import optional_import", "expected) def compare_samples_expected(self, dataset, expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape,", "TEST_CASE_1 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\":", "[243, 243]], [[243, 243], [243, 243]], [[243, 243], [243, 243]],", "\"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [ { \"image\":", "input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0,", "test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self,", "[ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\":", "}, [ { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\",", "100], }, ], ] TEST_CASE_OPENSLIDE_1 = [ { \"data\": [{\"image\":", "unittest from unittest import skipUnless import numpy as np from", "mask[100, 100] = 1 np.save(MASK1, mask) mask[100, 100:102] = 1", "compare_samples_expected(self, dataset, expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"],", "self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, \"Requires", "in range(len(dataset)): self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"]) self.assertListEqual(dataset[i][0][\"mask_location\"], expected[i][\"mask_location\"])", "@skipUnless(has_cim, \"Requires CuCIM\") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset =", "\"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ], ] TEST_CASE_1 =", "expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, \"Requires OpenSlide\")", "1 np.save(MASK1, mask) mask[100, 100:102] = 1 np.save(MASK2, mask) mask[100:102,", "[100, 100], }, ], ] TEST_CASE_OPENSLIDE_1 = [ { \"data\":", "[[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 101], }, { \"image\":", "np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100], },", "import skipUnless import numpy as np from numpy.testing import assert_array_equal", "{\"image\": FILE_PATH, \"mask\": MASK1}, {\"image\": FILE_PATH, \"mask\": MASK2}, ], \"patch_size\":", "], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [ { \"image\": np.array([[[243]],", "MASK1}, {\"image\": FILE_PATH, \"mask\": MASK2}, ], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\",", "[[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, {", "os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\") MASK2 = os.path.join(os.path.dirname(__file__), \"testing_data\",", "100:102] = 1 np.save(MASK4, mask) TEST_CASE_0 = [ { \"data\":", "FILE_PATH, \"mask\": MASK4}], \"patch_size\": 1, \"image_reader_name\": \"cuCIM\", }, [ {", "[101, 101], }, ], ] TEST_CASE_3 = [ { \"data\":", "\"patch_size\": 2, \"image_reader_name\": \"cuCIM\", }, [ { \"image\": np.array( [", "FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ]", "MASK1}, ], \"patch_size\": 2, \"image_reader_name\": \"cuCIM\", }, [ { \"image\":", "import numpy as np from numpy.testing import assert_array_equal from parameterized", "self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected): for i in range(len(dataset)):", "import optional_import from tests.utils import skip_if_quick _, has_cim = optional_import(\"cucim\")", "np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 101], },", "], ] TEST_CASE_1 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\":", "// 2)) mask[100, 100] = 1 np.save(MASK1, mask) mask[100, 100:102]", "self.assertTupleEqual(dataset[i][0][\"image\"].shape, expected[i][\"image\"].shape) self.assertIsNone(assert_array_equal(dataset[i][0][\"image\"], expected[i][\"image\"])) self.assertEqual(dataset[i][0][\"name\"], expected[i][\"name\"]) self.assertListEqual(dataset[i][0][\"mask_location\"], expected[i][\"mask_location\"]) if __name__", "MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected): for i in", "@skipUnless(has_osl, \"Requires OpenSlide\") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset =", "[[243, 243], [243, 243]], [[243, 243], [243, 243]], ], dtype=np.uint8,", "{ \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101,", "101], }, ], ] TEST_CASE_3 = [ { \"data\": [", "FILE_PATH, \"mask\": MASK1}, {\"image\": FILE_PATH, \"mask\": MASK2}, ], \"patch_size\": 1,", "1 np.save(MASK4, mask) TEST_CASE_0 = [ { \"data\": [ {\"image\":", "os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\") MASK2 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\") MASK4 =", "FILE_PATH, \"mask\": MASK2}], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [ {", "[243, 243]], [[243, 243], [243, 243]], ], dtype=np.uint8, ), \"name\":", "\"CMU-1\", \"mask_location\": [100, 100], }, { \"image\": np.array([[[243]], [[243]], [[243]]],", "MASK1}, ], \"patch_size\": 1, \"image_reader_name\": \"OpenSlide\", }, [ { \"image\":", "= MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected): for i", "[ {\"image\": FILE_PATH, \"mask\": MASK1}, {\"image\": FILE_PATH, \"mask\": MASK2}, ],", "= os.path.join(os.path.dirname(__file__), \"testing_data\", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask1.npy\") MASK2", "100:102] = 1 np.save(MASK2, mask) mask[100:102, 100:102] = 1 np.save(MASK4,", "[100, 100], }, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\":", "243], [243, 243]], [[243, 243], [243, 243]], ], dtype=np.uint8, ),", "\"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [101, 100],", "TEST_CASE_2 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK4}], \"patch_size\":", "{ \"data\": [ {\"image\": FILE_PATH, \"mask\": MASK1}, {\"image\": FILE_PATH, \"mask\":", "MASK2 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\") MASK4 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\")", "243], [243, 243]], ], dtype=np.uint8, ), \"name\": \"CMU-1\", \"mask_location\": [100,", "\"mask\": MASK1}, {\"image\": FILE_PATH, \"mask\": MASK2}, ], \"patch_size\": 1, \"image_reader_name\":", "MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl,", "TEST_CASE_OPENSLIDE_1 = [ { \"data\": [{\"image\": FILE_PATH, \"mask\": MASK2}], \"patch_size\":", "}, { \"image\": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\":", "[[243]], [[243]]], dtype=np.uint8), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ],", "= os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask2.npy\") MASK4 = os.path.join(os.path.dirname(__file__), \"testing_data\", \"tissue_mask4.npy\") HEIGHT", "], dtype=np.uint8, ), \"name\": \"CMU-1\", \"mask_location\": [100, 100], }, ]," ]
[ "for i in items] def get_cover(self) -> str: return self._cover_from_content(self.img_selector)", "'archive' def get_chapter_index(self) -> str: return '0' def get_main_content(self): return", "def get_chapters(self): return [b''] def get_files(self): items = self.document_fromstring(self.content, self.img_selector)", "return '0' def get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self) -> str:", "# todo meta pass def chapter_for_json(self): return self.get_url() main =", "import Provider from .helpers.std import Std class DoujinsCom(Provider, Std): img_selector", "-> str: return self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b''] def get_files(self):", "return self._cover_from_content(self.img_selector) def book_meta(self) -> dict: # todo meta pass", "-> dict: # todo meta pass def chapter_for_json(self): return self.get_url()", "Provider from .helpers.std import Std class DoujinsCom(Provider, Std): img_selector =", "get_chapter_index(self) -> str: return '0' def get_main_content(self): return self._get_content('{}/gallery/{}') def", "items = self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&') for i in", "todo meta pass def chapter_for_json(self): return self.get_url() main = DoujinsCom", "return self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b''] def get_files(self): items =", "def get_files(self): items = self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&') for", "def get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b'']", "get_files(self): items = self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&') for i", "self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&') for i in items] def", "self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b''] def get_files(self): items = self.document_fromstring(self.content,", "= self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&') for i in items]", "'#image-container img.doujin' def get_archive_name(self) -> str: return 'archive' def get_chapter_index(self)", "-> str: return self._cover_from_content(self.img_selector) def book_meta(self) -> dict: # todo", "[b''] def get_files(self): items = self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&')", "import Std class DoujinsCom(Provider, Std): img_selector = '#image-container img.doujin' def", "return 'archive' def get_chapter_index(self) -> str: return '0' def get_main_content(self):", "manga_py.provider import Provider from .helpers.std import Std class DoujinsCom(Provider, Std):", "img_selector = '#image-container img.doujin' def get_archive_name(self) -> str: return 'archive'", "[i.get('data-file').replace('&amp;', '&') for i in items] def get_cover(self) -> str:", "i in items] def get_cover(self) -> str: return self._cover_from_content(self.img_selector) def", "get_archive_name(self) -> str: return 'archive' def get_chapter_index(self) -> str: return", "get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)') def", "in items] def get_cover(self) -> str: return self._cover_from_content(self.img_selector) def book_meta(self)", "items] def get_cover(self) -> str: return self._cover_from_content(self.img_selector) def book_meta(self) ->", "'0' def get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self) -> str: return", "= '#image-container img.doujin' def get_archive_name(self) -> str: return 'archive' def", "str: return '0' def get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self) ->", "Std): img_selector = '#image-container img.doujin' def get_archive_name(self) -> str: return", "get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b''] def", "return [b''] def get_files(self): items = self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;',", "str: return 'archive' def get_chapter_index(self) -> str: return '0' def", "self.img_selector) return [i.get('data-file').replace('&amp;', '&') for i in items] def get_cover(self)", "-> str: return 'archive' def get_chapter_index(self) -> str: return '0'", "get_cover(self) -> str: return self._cover_from_content(self.img_selector) def book_meta(self) -> dict: #", "img.doujin' def get_archive_name(self) -> str: return 'archive' def get_chapter_index(self) ->", "<gh_stars>1-10 from manga_py.provider import Provider from .helpers.std import Std class", "def get_archive_name(self) -> str: return 'archive' def get_chapter_index(self) -> str:", "def get_chapter_index(self) -> str: return '0' def get_main_content(self): return self._get_content('{}/gallery/{}')", "'&') for i in items] def get_cover(self) -> str: return", "-> str: return '0' def get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self)", "str: return self._cover_from_content(self.img_selector) def book_meta(self) -> dict: # todo meta", "str: return self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b''] def get_files(self): items", "get_chapters(self): return [b''] def get_files(self): items = self.document_fromstring(self.content, self.img_selector) return", "return self._get_content('{}/gallery/{}') def get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)') def get_chapters(self):", "def get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)')", "self._cover_from_content(self.img_selector) def book_meta(self) -> dict: # todo meta pass def", "def book_meta(self) -> dict: # todo meta pass def chapter_for_json(self):", ".helpers.std import Std class DoujinsCom(Provider, Std): img_selector = '#image-container img.doujin'", "self._get_content('{}/gallery/{}') def get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)') def get_chapters(self): return", "book_meta(self) -> dict: # todo meta pass def chapter_for_json(self): return", "Std class DoujinsCom(Provider, Std): img_selector = '#image-container img.doujin' def get_archive_name(self)", "dict: # todo meta pass def chapter_for_json(self): return self.get_url() main", "def get_cover(self) -> str: return self._cover_from_content(self.img_selector) def book_meta(self) -> dict:", "class DoujinsCom(Provider, Std): img_selector = '#image-container img.doujin' def get_archive_name(self) ->", "return [i.get('data-file').replace('&amp;', '&') for i in items] def get_cover(self) ->", "DoujinsCom(Provider, Std): img_selector = '#image-container img.doujin' def get_archive_name(self) -> str:", "from manga_py.provider import Provider from .helpers.std import Std class DoujinsCom(Provider,", "from .helpers.std import Std class DoujinsCom(Provider, Std): img_selector = '#image-container" ]
[ "QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False) def setModelData(self, editor:", "editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(), Qt.EditRole) @pyqtSlot()", "PyQt5.QtCore import QModelIndex, QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets import QItemDelegate,", "QStyleOptionViewItem, index: QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def", "QCheckBox, model: QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(), Qt.EditRole) @pyqtSlot() def", "QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent) self.enabled = True", "QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent) self.enabled", "QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent)", "<reponame>awesome-archive/urh<filename>src/urh/ui/delegates/CheckBoxDelegate.py from PyQt5.QtCore import QModelIndex, QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets", "option: QStyleOptionViewItem, index: QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor", "index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False) def setModelData(self,", "model: QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(), Qt.EditRole) @pyqtSlot() def stateChanged(self):", "self.enabled = editor.isChecked() editor.blockSignals(False) def setModelData(self, editor: QCheckBox, model: QAbstractItemModel,", "class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent) self.enabled = True def", "editor def setEditorData(self, editor: QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled", "QWidget, option: QStyleOptionViewItem, index: QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return", "= QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self, editor: QCheckBox, index:", "setEditorData(self, editor: QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked()", "True def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex): editor", "def setEditorData(self, editor: QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled =", "from PyQt5.QtCore import QModelIndex, QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets import", "pyqtSlot from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate):", "QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent) self.enabled =", "PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self,", "from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def", "Qt, pyqtSlot from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class", "index: QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self,", "editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False) def setModelData(self, editor: QCheckBox, model:", "CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent) self.enabled = True def createEditor(self,", "= True def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex):", "QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False) def", "super().__init__(parent) self.enabled = True def createEditor(self, parent: QWidget, option: QStyleOptionViewItem,", "QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self, editor:", "editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self, editor: QCheckBox, index: QModelIndex): editor.blockSignals(True)", "editor: QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False)", "editor.blockSignals(False) def setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex): model.setData(index,", "def __init__(self, parent=None): super().__init__(parent) self.enabled = True def createEditor(self, parent:", "parent=None): super().__init__(parent) self.enabled = True def createEditor(self, parent: QWidget, option:", "parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged)", "= editor.isChecked() editor.blockSignals(False) def setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index:", "QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox", "createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex): editor = QCheckBox(parent)", "setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(), Qt.EditRole)", "QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self, editor: QCheckBox, index: QModelIndex):", "import QModelIndex, QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets import QItemDelegate, QWidget,", "return editor def setEditorData(self, editor: QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index))", "QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(), Qt.EditRole) @pyqtSlot() def stateChanged(self): self.commitData.emit(self.sender())", "editor.isChecked() editor.blockSignals(False) def setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex):", "editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False) def setModelData(self, editor: QCheckBox,", "def setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(),", "import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None):", "QModelIndex, QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem,", "self.enabled = True def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index:", "def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex): editor =", "__init__(self, parent=None): super().__init__(parent) self.enabled = True def createEditor(self, parent: QWidget,", "editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self, editor: QCheckBox," ]
[ "input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net,", "= 100 # Number of epoch #Data preprocessing/ Converting data", "Validation samples: 400 RNN with 128 units Optimizer: Adam Epoch:", "keep_prob = 0.8 Accuracy of Validation set: 95% ''' from", "Tensorflow Training samples: 1600 Validation samples: 400 RNN with 128", "output_dim=128) net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net, 2,", "to vector for the X = pad_sequences(X, maxlen=5, value=0.) Y", "= 0.001 # Learning rate for mini-batch SGD batch_size =", "Learning rate for mini-batch SGD batch_size = 32 # Batch", "in the RNN layer keep_prob = 0.5 # Keep probability", "maxlen=5, value=0.) Y = to_categorical(Y, 2) #Build the network net", "tflearn from tflearn.data_utils import to_categorical, pad_sequences from data_denbigh import *", "Training samples: 1600 Validation samples: 400 RNN with 128 units", "with 128 units Optimizer: Adam Epoch: 100 Loss: Cross Entropy", "import division, print_function, absolute_import import tflearn from tflearn.data_utils import to_categorical,", "dropout=keep_prob) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam',", "= 0.8 Accuracy of Validation set: 95% ''' from __future__", "tflearn.data_utils import to_categorical, pad_sequences from data_denbigh import * X, Y", "print_function, absolute_import import tflearn from tflearn.data_utils import to_categorical, pad_sequences from", "the drop-out regularization learning_rate = 0.001 # Learning rate for", "n_epoch = 100 # Number of epoch #Data preprocessing/ Converting", "1600 Validation samples: 400 RNN with 128 units Optimizer: Adam", "SGD batch_size = 32 # Batch size n_epoch = 100", "X = pad_sequences(X, maxlen=5, value=0.) Y = to_categorical(Y, 2) #Build", "activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net,", "Number of neurons in the RNN layer keep_prob = 0.5", "Framework: Tensorflow Training samples: 1600 Validation samples: 400 RNN with", "# -*- coding: utf-8 -*- ''' Framework: Tensorflow Training samples:", "#Hyperparams neurons_num = 128 # Number of neurons in the", "value=0.) Y = to_categorical(Y, 2) #Build the network net =", "net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,", "size n_epoch = 100 # Number of epoch #Data preprocessing/", "= pad_sequences(X, maxlen=5, value=0.) Y = to_categorical(Y, 2) #Build the", "the RNN layer keep_prob = 0.5 # Keep probability for", "= 0.5 # Keep probability for the drop-out regularization learning_rate", "mini-batch SGD batch_size = 32 # Batch size n_epoch =", "100 # Number of epoch #Data preprocessing/ Converting data to", "= tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net, 2, activation='softmax') net", "2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model =", "0.5 # Keep probability for the drop-out regularization learning_rate =", "for regression Regularization: Drop-out, keep_prob = 0.8 Accuracy of Validation", "95% ''' from __future__ import division, print_function, absolute_import import tflearn", "5]) net = tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net, neurons_num,", "net = tflearn.input_data([None, 5]) net = tflearn.embedding(net, input_dim=10000, output_dim=128) net", "samples: 400 RNN with 128 units Optimizer: Adam Epoch: 100", "keep_prob = 0.5 # Keep probability for the drop-out regularization", "= 128 # Number of neurons in the RNN layer", "Drop-out, keep_prob = 0.8 Accuracy of Validation set: 95% '''", "-*- coding: utf-8 -*- ''' Framework: Tensorflow Training samples: 1600", "getDenbighData() #Hyperparams neurons_num = 128 # Number of neurons in", "from data_denbigh import * X, Y = getDenbighData() #Hyperparams neurons_num", "Keep probability for the drop-out regularization learning_rate = 0.001 #", "net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0)", "# Number of neurons in the RNN layer keep_prob =", "utf-8 -*- ''' Framework: Tensorflow Training samples: 1600 Validation samples:", "= 32 # Batch size n_epoch = 100 # Number", "preprocessing/ Converting data to vector for the X = pad_sequences(X,", "network net = tflearn.input_data([None, 5]) net = tflearn.embedding(net, input_dim=10000, output_dim=128)", "Epoch: 100 Loss: Cross Entropy Activation function: Relu for network", "and Soft-max for regression Regularization: Drop-out, keep_prob = 0.8 Accuracy", "# Number of epoch #Data preprocessing/ Converting data to vector", "= getDenbighData() #Hyperparams neurons_num = 128 # Number of neurons", "Activation function: Relu for network and Soft-max for regression Regularization:", "learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, validation_set=0.2, show_metric=True,", "from __future__ import division, print_function, absolute_import import tflearn from tflearn.data_utils", "the X = pad_sequences(X, maxlen=5, value=0.) Y = to_categorical(Y, 2)", "for mini-batch SGD batch_size = 32 # Batch size n_epoch", "#Data preprocessing/ Converting data to vector for the X =", "''' Framework: Tensorflow Training samples: 1600 Validation samples: 400 RNN", "neurons in the RNN layer keep_prob = 0.5 # Keep", "= tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, validation_set=0.2, show_metric=True, batch_size=batch_size, n_epoch=n_epoch) model.save('./model.tfl')", "Adam Epoch: 100 Loss: Cross Entropy Activation function: Relu for", "= tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X,", "net = tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob)", "# Batch size n_epoch = 100 # Number of epoch", "of epoch #Data preprocessing/ Converting data to vector for the", "Entropy Activation function: Relu for network and Soft-max for regression", "units Optimizer: Adam Epoch: 100 Loss: Cross Entropy Activation function:", "probability for the drop-out regularization learning_rate = 0.001 # Learning", "pad_sequences from data_denbigh import * X, Y = getDenbighData() #Hyperparams", "128 units Optimizer: Adam Epoch: 100 Loss: Cross Entropy Activation", "for the drop-out regularization learning_rate = 0.001 # Learning rate", "regularization learning_rate = 0.001 # Learning rate for mini-batch SGD", "to_categorical(Y, 2) #Build the network net = tflearn.input_data([None, 5]) net", "regression Regularization: Drop-out, keep_prob = 0.8 Accuracy of Validation set:", "for the X = pad_sequences(X, maxlen=5, value=0.) Y = to_categorical(Y,", "neurons_num = 128 # Number of neurons in the RNN", "of neurons in the RNN layer keep_prob = 0.5 #", "RNN with 128 units Optimizer: Adam Epoch: 100 Loss: Cross", "Loss: Cross Entropy Activation function: Relu for network and Soft-max", "Relu for network and Soft-max for regression Regularization: Drop-out, keep_prob", "to_categorical, pad_sequences from data_denbigh import * X, Y = getDenbighData()", "from tflearn.data_utils import to_categorical, pad_sequences from data_denbigh import * X,", "optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, validation_set=0.2,", "function: Relu for network and Soft-max for regression Regularization: Drop-out,", "coding: utf-8 -*- ''' Framework: Tensorflow Training samples: 1600 Validation", "set: 95% ''' from __future__ import division, print_function, absolute_import import", "400 RNN with 128 units Optimizer: Adam Epoch: 100 Loss:", "# Learning rate for mini-batch SGD batch_size = 32 #", "Soft-max for regression Regularization: Drop-out, keep_prob = 0.8 Accuracy of", "tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net =", "loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, validation_set=0.2, show_metric=True, batch_size=batch_size,", "import * X, Y = getDenbighData() #Hyperparams neurons_num = 128", "the network net = tflearn.input_data([None, 5]) net = tflearn.embedding(net, input_dim=10000,", "drop-out regularization learning_rate = 0.001 # Learning rate for mini-batch", "net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net, 2, activation='softmax')", "Cross Entropy Activation function: Relu for network and Soft-max for", "= to_categorical(Y, 2) #Build the network net = tflearn.input_data([None, 5])", "tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y,", "32 # Batch size n_epoch = 100 # Number of", "pad_sequences(X, maxlen=5, value=0.) Y = to_categorical(Y, 2) #Build the network", "import tflearn from tflearn.data_utils import to_categorical, pad_sequences from data_denbigh import", "100 Loss: Cross Entropy Activation function: Relu for network and", "__future__ import division, print_function, absolute_import import tflearn from tflearn.data_utils import", "tflearn.input_data([None, 5]) net = tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net,", "rate for mini-batch SGD batch_size = 32 # Batch size", "epoch #Data preprocessing/ Converting data to vector for the X", "0.8 Accuracy of Validation set: 95% ''' from __future__ import", "0.001 # Learning rate for mini-batch SGD batch_size = 32", "tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net, 2, activation='softmax') net =", "Validation set: 95% ''' from __future__ import division, print_function, absolute_import", "data to vector for the X = pad_sequences(X, maxlen=5, value=0.)", "= tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')", "for network and Soft-max for regression Regularization: Drop-out, keep_prob =", "-*- ''' Framework: Tensorflow Training samples: 1600 Validation samples: 400", "Optimizer: Adam Epoch: 100 Loss: Cross Entropy Activation function: Relu", "data_denbigh import * X, Y = getDenbighData() #Hyperparams neurons_num =", "network and Soft-max for regression Regularization: Drop-out, keep_prob = 0.8", "Accuracy of Validation set: 95% ''' from __future__ import division,", "absolute_import import tflearn from tflearn.data_utils import to_categorical, pad_sequences from data_denbigh", "Number of epoch #Data preprocessing/ Converting data to vector for", "2) #Build the network net = tflearn.input_data([None, 5]) net =", "neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net,", "learning_rate = 0.001 # Learning rate for mini-batch SGD batch_size", "batch_size = 32 # Batch size n_epoch = 100 #", "* X, Y = getDenbighData() #Hyperparams neurons_num = 128 #", "= tflearn.input_data([None, 5]) net = tflearn.embedding(net, input_dim=10000, output_dim=128) net =", "tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model", "Y = to_categorical(Y, 2) #Build the network net = tflearn.input_data([None,", "division, print_function, absolute_import import tflearn from tflearn.data_utils import to_categorical, pad_sequences", "128 # Number of neurons in the RNN layer keep_prob", "samples: 1600 Validation samples: 400 RNN with 128 units Optimizer:", "Regularization: Drop-out, keep_prob = 0.8 Accuracy of Validation set: 95%", "RNN layer keep_prob = 0.5 # Keep probability for the", "X, Y = getDenbighData() #Hyperparams neurons_num = 128 # Number", "Batch size n_epoch = 100 # Number of epoch #Data", "layer keep_prob = 0.5 # Keep probability for the drop-out", "Y = getDenbighData() #Hyperparams neurons_num = 128 # Number of", "#Build the network net = tflearn.input_data([None, 5]) net = tflearn.embedding(net,", "import to_categorical, pad_sequences from data_denbigh import * X, Y =", "of Validation set: 95% ''' from __future__ import division, print_function,", "''' from __future__ import division, print_function, absolute_import import tflearn from", "Converting data to vector for the X = pad_sequences(X, maxlen=5,", "# Keep probability for the drop-out regularization learning_rate = 0.001", "model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, validation_set=0.2, show_metric=True, batch_size=batch_size, n_epoch=n_epoch)", "vector for the X = pad_sequences(X, maxlen=5, value=0.) Y =", "= tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net" ]
[ "x M x 2H q_tile = tf.transpose(q_tile, (1, 0, 3,", "as tf H = 2 N = 2 M =", "np.array([True, True, False]) # BS x M context_mask = np.tile(context_mask,", "test_concatenation(c2q, q2c) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init)", "x M contexts = tf.expand_dims(contexts, -1) # BS x N", "np.array([1., 2., 3., 4.]) w_2 = np.array([5., 6., 7., 8.])", "x N x 2H x M contexts = tf.expand_dims(contexts, -1)", "padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return", "1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H])", "# BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H])", "BS x N x M # contexts = BS x", "[BS, 1, 1]) questions = tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts',", "masked_softmax(S, mask, 2) # BS x N x M return", "= tf.expand_dims(question_mask, -1) # BS x M x 1 question_mask", "1 over given dimension. \"\"\" exp_mask = (1 - tf.cast(mask,", "(-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N * M,", "tf.expand_dims(w_sim_1, -1)) # BS x N term1 = tf.reshape(term1, (-1,", "same as logits, but with 1e30 subtracted (i.e. very large", "N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1))", "# 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1,", "11.]]]) # BS x M x 2H c = np.tile(c,", "N, 1)) return exp_array /sum_array def masked_softmax(logits, mask, dim): \"\"\"", "N x M # contexts = BS x N x", "2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 *", "1, 1]) q = np.tile(q, [BS, 1, 1]) questions =", "tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M", "x N x M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask,", "= arr - max_elements exp_array = np.exp(arr) print (exp_array) sum_array", "N * M, 2 * H)) # BS x (NxM)", "questions = tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts', initializer=c) S =", "BS x N x 2H x 1 result = (contexts", "* tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N beta =", "array. We want to take softmax over dimension dim. mask:", "padding locations. Should sum to 1 over given dimension. \"\"\"", "# BS x N x 1 question_mask = tf.expand_dims(question_mask, -1)", "[BS, 1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask',", "# BS x N x M return sim_mask def test_build_c2q(S,", "test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N, 1)) output =", "dimension of logits. Inputs: logits: Numpy array. We want to", "N x 2H q = np.array([[[1., 2., 3., 0.], [5.,", "x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H,", "N, M)) # BS x N x M S =", "logits. The result of taking softmax over masked_logits in given", "= tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1", "1)) return exp_array /sum_array def masked_softmax(logits, mask, dim): \"\"\" Takes", "+ tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask(): context_mask", "1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask", "term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS *", "is the same as logits, but with 1e30 subtracted (i.e.", "successful!\") print (\"Context 2 Question attention\") m_r, beta_r, q2c_r =", "x 2H result = tf.reshape(result, (-1, N * M, 2", "to take softmax over dimension dim. mask: Numpy array of", "= tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1 beta", "M]) result = tf.transpose(result, (0, 1, 3, 2)) # BS", "(-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2,", "* H, M]) result = tf.transpose(result, (0, 1, 3, 2))", "+ term3 + tf.reshape(term2, (-1, 1, M)) return S def", "tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS,", "(\"Context 2 Question attention\") m_r, beta_r, q2c_r = sess.run([m, beta,", "8.]]]) # BS x N x 2H q = np.array([[[1.,", "elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's", "6., 7., 4.], [8., 9. , 10., 11.]]]) # BS", "dtype=tf.int32)) # BS x N x M return sim_mask def", "We want to take softmax over dimension dim. mask: Numpy", "False]) # BS x M context_mask = np.tile(context_mask, [BS, 1])", ", 10., 11.]]]) # BS x M x 2H c", "if __name__== \"__main__\": w_1 = np.array([1., 2., 3., 4.]) w_2", "dim: int. dimension over which to take softmax Returns: masked_logits:", "return S def test_build_sim_mask(): context_mask = np.array([True, True]) # BS", "7., 8.]) w_3 = np.array([13., 12., 11., 10.]) c =", "np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr = arr", "print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's padding,", "* M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1,", "= tf.matmul(beta, contexts) return m, beta, q2c def test_concatenation(c2q, q2c):", "BS x N x 2H q = np.array([[[1., 2., 3.,", "= np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask =", "= tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0),", "_, alpha = masked_softmax(S, mask, 2) # BS x N", "BS x N x M x 2H result = tf.reshape(result,", "M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N,", "N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x", "q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N", "= tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3,", "mask, contexts) output = test_concatenation(c2q, q2c) init = tf.global_variables_initializer() with", "is successful!\") print (\"Context 2 Question attention\") m_r, beta_r, q2c_r", "initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) #", "of taking softmax over masked_logits in given dimension. Should be", "q2c = test_build_q2c(S, mask, contexts) output = test_concatenation(c2q, q2c) init", "= tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS *", "11., 10.]) c = np.array([[[1., 2., 3., 4.], [5., 6.,", "= np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask", "there's real data in logits, 0 where there's padding dim:", "4*H]) return output if __name__== \"__main__\": w_1 = np.array([1., 2.,", "subtracted (i.e. very large negative number) in the padding locations.", "term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) #", "take softmax Returns: masked_logits: Numpy array same shape as logits.", "tf.cast(question_mask, dtype=tf.int32)) # BS x N x M return sim_mask", "= np.array([13., 12., 11., 10.]) c = np.array([[[1., 2., 3.,", "772, 1372], [548, 1828, 3140]]), [BS, 1, 1]) assert np.array_equal(actual_result,", "= tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x", "# where there's padding, set logits to -large prob_dist =", "S = test_build_similarity(contexts, questions) mask = test_build_sim_mask() c2q = test_build_c2q(S,", "m_r, beta_r, q2c_r = sess.run([m, beta, q2c]) output_r = sess.run(output)", "logits, 0 where there's padding dim: int. dimension over which", "over given dimension of logits. Inputs: logits: Numpy array. We", "shape as logits. Has 1s where there's real data in", "x 1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32))", "= tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N", "tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts, questions)", "2)) # BS x N x M x 2H result", "tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result = tf.transpose(result,", "H = 2 N = 2 M = 3 BS", "def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N, 1)) output", "1]) q = np.tile(q, [BS, 1, 1]) questions = tf.get_variable('questions',", "to take softmax Returns: masked_logits: Numpy array same shape as", "(-1e30) # -large where there's padding, 0 elsewhere print (exp_mask)", "= tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions,", "\"\"\" exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) #", "Takes masked softmax over given dimension of logits. Inputs: logits:", "shape as logits. This is the same as logits, but", "N x M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts):", "[N, 1, 1, 1]) # N x BS x M", "[8., 9. , 10., 11.]]]) # BS x M x", "x M term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result,", "alpha = masked_softmax(S, mask, 2) # BS x N x", "(BS, N, 1)) arr = arr - max_elements exp_array =", "= masked_softmax(S, mask, 2) # BS x N x M", "test_build_similarity(contexts, questions) mask = test_build_sim_mask() c2q = test_build_c2q(S, mask, questions)", "H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M)) #", "[BS, N, 4*H]) return output if __name__== \"__main__\": w_1 =", "initializer=q) contexts = tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts, questions) mask", "0 where there's padding dim: int. dimension over which to", "# w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 =", "test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H", "7., 8.]]]) # BS x N x 2H q =", "print (\"Context 2 Question attention\") m_r, beta_r, q2c_r = sess.run([m,", "0), [N, 1, 1, 1]) # N x BS x", "softmax over masked_logits in given dimension. Should be 0 in", "tf.reshape(term3, (-1, N, M)) # BS x N x M", "return sim_mask def test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S,", "locations. prob_dist: Numpy array same shape as logits. The result", "tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N beta = tf.expand_dims(tf.nn.softmax(m),", "tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) S_result, mask_result, c2q_r =", "tf.transpose(result, (0, 1, 3, 2)) # BS x N x", "N x 1 beta = tf.transpose(beta, (0, 2, 1)) q2c", "x N x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2", "tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if __name__== \"__main__\": w_1", "c2q = test_build_c2q(S, mask, questions) m, beta, q2c = test_build_q2c(S,", "[BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1", "as np import tensorflow as tf H = 2 N", "= (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where", "= tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output", "1e30 subtracted (i.e. very large negative number) in the padding", "in the padding locations. prob_dist: Numpy array same shape as", "0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where", "(-1, N, M)) # BS x N x M S", "m, beta, q2c = test_build_q2c(S, mask, contexts) output = test_concatenation(c2q,", "tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3)", "# BS x N x M x 2H result =", "0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]])", "= 2 M = 3 BS = 10 def my_softmax(arr):", "= np.array([5., 6., 7., 8.]) w_3 = np.array([13., 12., 11.,", "x 1 question_mask = tf.expand_dims(question_mask, -1) # BS x M", "term3 = tf.reshape(term3, (-1, N, M)) # BS x N", "BS x 1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask,", "x M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): #", "x 1 beta = tf.transpose(beta, (0, 2, 1)) q2c =", "same shape as logits. Has 1s where there's real data", "where there's padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits,", "- max_elements exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array,", "tf.expand_dims(contexts, -1) # BS x N x 2H x 1", "return exp_array /sum_array def masked_softmax(logits, mask, dim): \"\"\" Takes masked", "1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) #", "* (-1e30) # -large where there's padding, 0 elsewhere print", "# BS x N x M return tf.matmul(alpha, questions) def", "= tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2',", "(1, 0, 3, 2)) # BS x N x 2H", "-large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts,", "BS x N x M return tf.matmul(alpha, questions) def test_build_q2c(S,", "x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1", "mask_result, c2q_r = sess.run([S, mask, c2q]) actual_result = np.tile(np.array([[228, 772,", "(0, 2, 1)) # BS x 1 x M sim_mask", "given dimension. \"\"\" exp_mask = (1 - tf.cast(mask, 'float64')) *", "not equal' print (\"Building similarity matrix is successful!\") print (\"Context", "BS x N x 2H x M contexts = tf.expand_dims(contexts,", "q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.],", "1, 1]) questions = tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts', initializer=c)", "given dimension of logits. Inputs: logits: Numpy array. We want", "1, 3, 2)) # BS x N x M x", "context_mask = np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS, 1])", "context_mask = np.array([True, True]) # BS x N question_mask =", "= test_concatenation(c2q, q2c) init = tf.global_variables_initializer() with tf.Session() as sess:", "S_result), 'Arrays are not equal' print (\"Building similarity matrix is", "as logits. This is the same as logits, but with", "mask, dim): \"\"\" Takes masked softmax over given dimension of", "def test_build_sim_mask(): context_mask = np.array([True, True]) # BS x N", "2), (BS, N, 1)) arr = arr - max_elements exp_array", "= (contexts * q_tile) # BS x N x 2H", "= tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3',", "S = BS x N x M # contexts =", "of same shape as logits. Has 1s where there's real", "def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 *", "2H x M contexts = tf.expand_dims(contexts, -1) # BS x", "M x 2H c = np.tile(c, [BS, 1, 1]) q", "= tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS x", "sess.run([S, mask, c2q]) actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828,", "attention\") m_r, beta_r, q2c_r = sess.run([m, beta, q2c]) output_r =", "[BS, N, 2 * H, M]) result = tf.transpose(result, (0,", "2., 3., 4.]) w_2 = np.array([5., 6., 7., 8.]) w_3", "# w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 =", "# BS x M x 2H c = np.tile(c, [BS,", "* M, 2 * H)) # BS x (NxM) x", "N x M S = tf.reshape(term1,(-1, N, 1)) + term3", "BS x M x 2H c = np.tile(c, [BS, 1,", "2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M))", "x M return sim_mask def test_build_c2q(S, S_mask, questions): _, alpha", "padding locations. prob_dist: Numpy array same shape as logits. The", "= np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array def", "2H c = np.tile(c, [BS, 1, 1]) q = np.tile(q,", "10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS,", "2 Question attention\") m_r, beta_r, q2c_r = sess.run([m, beta, q2c])", "mask, questions) m, beta, q2c = test_build_q2c(S, mask, contexts) output", "contexts = tf.expand_dims(contexts, -1) # BS x N x 2H", "exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS,", "result = (contexts * q_tile) # BS x N x", "x M x 1 question_mask = tf.transpose(question_mask, (0, 2, 1))", "2) # BS x N x M return tf.matmul(alpha, questions)", "question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1", "x M S = tf.reshape(term1,(-1, N, 1)) + term3 +", "* N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 =", "tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS x N", "M # contexts = BS x N x 2H m", "as sess: sess.run(init) S_result, mask_result, c2q_r = sess.run([S, mask, c2q])", "tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N *", "x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS", "= tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) S_result, mask_result, c2q_r", "__name__== \"__main__\": w_1 = np.array([1., 2., 3., 4.]) w_2 =", "x N x 1 question_mask = tf.expand_dims(question_mask, -1) # BS", "w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3,", "in padding locations. Should sum to 1 over given dimension.", "given dimension. Should be 0 in padding locations. Should sum", "x BS x M x 2H q_tile = tf.transpose(q_tile, (1,", "dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N x M return", "contexts) output = test_concatenation(c2q, q2c) init = tf.global_variables_initializer() with tf.Session()", "3., 4.]) w_2 = np.array([5., 6., 7., 8.]) w_3 =", "questions) mask = test_build_sim_mask() c2q = test_build_c2q(S, mask, questions) m,", "there's padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim)", "2 N = 2 M = 3 BS = 10", "softmax Returns: masked_logits: Numpy array same shape as logits. This", "N question_mask = np.array([True, True, False]) # BS x M", "N x 2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2)", "= tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts,", "term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS *", "1372], [548, 1828, 3140]]), [BS, 1, 1]) assert np.array_equal(actual_result, S_result),", "want to take softmax over dimension dim. mask: Numpy array", "* H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size", "tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M))", "# BS x N x M S = tf.reshape(term1,(-1, N,", "= tf.expand_dims(context_mask, -1) # BS x N x 1 question_mask", "test_build_sim_mask(): context_mask = np.array([True, True]) # BS x N question_mask", "over given dimension. \"\"\" exp_mask = (1 - tf.cast(mask, 'float64'))", "# contexts = BS x N x 2H m =", "S_mask, contexts): # S = BS x N x M", "1s where there's real data in logits, 0 where there's", "= 2 N = 2 M = 3 BS =", "w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS", "def masked_softmax(logits, mask, dim): \"\"\" Takes masked softmax over given", "np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask',", "* M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2", "'float64')) * (-1e30) # -large where there's padding, 0 elsewhere", "as logits, but with 1e30 subtracted (i.e. very large negative", "logits. Inputs: logits: Numpy array. We want to take softmax", "result = tf.reshape(result, (-1, N * M, 2 * H))", "initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) #", "axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if __name__== \"__main__\":", "The result of taking softmax over masked_logits in given dimension.", "taking softmax over masked_logits in given dimension. Should be 0", "w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 =", "-1) # BS x M x 1 question_mask = tf.transpose(question_mask,", "w_1 = np.array([1., 2., 3., 4.]) w_2 = np.array([5., 6.,", "N term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS", "axis = 2), (BS, N, 1)) arr = arr -", "output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return", "of logits. Inputs: logits: Numpy array. We want to take", "M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x", "3140]]), [BS, 1, 1]) assert np.array_equal(actual_result, S_result), 'Arrays are not", "= 3 BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr,", "1]) assert np.array_equal(actual_result, S_result), 'Arrays are not equal' print (\"Building", "(NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 =", "8.]) w_3 = np.array([13., 12., 11., 10.]) c = np.array([[[1.,", "question_mask = np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask", "tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N beta", "in given dimension. Should be 0 in padding locations. Should", "x N x M S = tf.reshape(term1,(-1, N, 1)) +", "N x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 *", "= tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS", "tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if", "\"__main__\": w_1 = np.array([1., 2., 3., 4.]) w_2 = np.array([5.,", "c2q_r = sess.run([S, mask, c2q]) actual_result = np.tile(np.array([[228, 772, 1372],", "c2q]) actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS,", "array of same shape as logits. Has 1s where there's", "self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) #", "contexts): # S = BS x N x M #", "1)) # BS x 1 x M sim_mask = tf.matmul(tf.cast(context_mask,", "np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask =", "6., 7., 8.]]]) # BS x N x 2H q", "x N x M x 2H result = tf.reshape(result, (-1,", "Has 1s where there's real data in logits, 0 where", "tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS,", "self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile", "question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS", "Should sum to 1 over given dimension. \"\"\" exp_mask =", "sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N", "x 2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) #", "x N x 2H q = np.array([[[1., 2., 3., 0.],", "= np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1])", "def test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S, mask, 2)", "Question attention\") m_r, beta_r, q2c_r = sess.run([m, beta, q2c]) output_r", "2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 *", "logits, but with 1e30 subtracted (i.e. very large negative number)", "padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) #", "2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0),", "= np.tile(q, [BS, 1, 1]) questions = tf.get_variable('questions', initializer=q) contexts", "1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3", "(BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M", "arr = arr - max_elements exp_array = np.exp(arr) print (exp_array)", "beta, q2c = test_build_q2c(S, mask, contexts) output = test_concatenation(c2q, q2c)", "q2c) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) S_result,", "= tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask,", "3, 2)) # BS x N x M x 2H", "(BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N", "2, 1)) # BS x 1 x M sim_mask =", "BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x", "context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask =", "to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def", "[5., 6., 7., 8.]]]) # BS x N x 2H", "np.tile(c, [BS, 1, 1]) q = np.tile(q, [BS, 1, 1])", "mask, c2q]) actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]),", "1)) arr = arr - max_elements exp_array = np.exp(arr) print", "m, beta, q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1,", "x N x M return sim_mask def test_build_c2q(S, S_mask, questions):", "tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0", "np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1))", "array same shape as logits. The result of taking softmax", "1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1))", "4.]) w_2 = np.array([5., 6., 7., 8.]) w_3 = np.array([13.,", "where there's real data in logits, 0 where there's padding", "Numpy array same shape as logits. The result of taking", "where there's padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits,", "tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N x M", "as logits. The result of taking softmax over masked_logits in", "sess: sess.run(init) S_result, mask_result, c2q_r = sess.run([S, mask, c2q]) actual_result", "term3 + tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask():", "[BS, 1, 1]) assert np.array_equal(actual_result, S_result), 'Arrays are not equal'", "H, M]) result = tf.transpose(result, (0, 1, 3, 2)) #", "N x M x 2H result = tf.reshape(result, (-1, N", "contexts) return m, beta, q2c def test_concatenation(c2q, q2c): q2c =", "tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2)", "3 BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis", "= tf.transpose(result, (0, 1, 3, 2)) # BS x N", "= np.array([True, True, False]) # BS x M context_mask =", "sum to 1 over given dimension. \"\"\" exp_mask = (1", "x 2H x M contexts = tf.expand_dims(contexts, -1) # BS", "my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1))", "2H result = tf.reshape(result, (-1, N * M, 2 *", "tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x", "q2c = tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q, q2c],", "w_3 = np.array([13., 12., 11., 10.]) c = np.array([[[1., 2.,", "# BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS", "1, 1]) assert np.array_equal(actual_result, S_result), 'Arrays are not equal' print", "q2c = tf.matmul(beta, contexts) return m, beta, q2c def test_concatenation(c2q,", "2, 1)) q2c = tf.matmul(beta, contexts) return m, beta, q2c", "x M # contexts = BS x N x 2H", "Numpy array same shape as logits. This is the same", "np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9.", "1, 1, 1]) # N x BS x M x", "M return sim_mask def test_build_c2q(S, S_mask, questions): _, alpha =", "S_mask, questions): _, alpha = masked_softmax(S, mask, 2) # BS", "1, 1]) # N x BS x M x 2H", "tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): # S = BS", "= tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts, questions) mask = test_build_sim_mask()", "assert np.array_equal(actual_result, S_result), 'Arrays are not equal' print (\"Building similarity", "\"\"\" Takes masked softmax over given dimension of logits. Inputs:", "Numpy array of same shape as logits. Has 1s where", "BS x M x 2H q_tile = tf.transpose(q_tile, (1, 0,", "x N x M # contexts = BS x N", "-large where there's padding, 0 elsewhere print (exp_mask) masked_logits =", "actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1,", "= 2), (BS, N, 1)) arr = arr - max_elements", "tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)),", "tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output),", "masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) #", "output if __name__== \"__main__\": w_1 = np.array([1., 2., 3., 4.])", "tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N,", "return m, beta, q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c,", "initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS x N x", "import numpy as np import tensorflow as tf H =", "= np.array([1., 2., 3., 4.]) w_2 = np.array([5., 6., 7.,", "x N question_mask = np.array([True, True, False]) # BS x", "logits. This is the same as logits, but with 1e30", "q_tile) # BS x N x 2H x M tf.assert_equal(tf.shape(result),", "1 question_mask = tf.expand_dims(question_mask, -1) # BS x M x", "np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS", "test_build_q2c(S, mask, contexts) output = test_concatenation(c2q, q2c) init = tf.global_variables_initializer()", "BS x N x 2H x M tf.assert_equal(tf.shape(result), [BS, N,", "initializer=c) S = test_build_similarity(contexts, questions) mask = test_build_sim_mask() c2q =", "= tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M,", "question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1", "= tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta, contexts) return", "over which to take softmax Returns: masked_logits: Numpy array same", "Should be 0 in padding locations. Should sum to 1", "result = tf.transpose(result, (0, 1, 3, 2)) # BS x", "x 2H q = np.array([[[1., 2., 3., 0.], [5., 6.,", "np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array def masked_softmax(logits,", "tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1))", "be 0 in padding locations. Should sum to 1 over", "initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N,", "* q_tile) # BS x N x 2H x M", "0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1])", "-1)) # BS x M term2 = tf.reshape(term2, (-1, M))", "return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): # S =", "tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x", "2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS", "test_build_sim_mask() c2q = test_build_c2q(S, mask, questions) m, beta, q2c =", "numpy as np import tensorflow as tf H = 2", "masked_softmax(logits, mask, dim): \"\"\" Takes masked softmax over given dimension", "x N x 1 beta = tf.transpose(beta, (0, 2, 1))", "9. , 10., 11.]]]) # BS x M x 2H", "= np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8.,", "(0, 1, 3, 2)) # BS x N x M", "q2c): q2c = tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q,", "dimension. Should be 0 in padding locations. Should sum to", "Numpy array. We want to take softmax over dimension dim.", "x 1 result = (contexts * q_tile) # BS x", "# BS x N x 2H x M tf.assert_equal(tf.shape(result), [BS,", "tf.reshape(result, (-1, N * M, 2 * H)) # BS", "N x M return sim_mask def test_build_c2q(S, S_mask, questions): _,", "contexts = tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts, questions) mask =", "/sum_array def masked_softmax(logits, mask, dim): \"\"\" Takes masked softmax over", "(1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's", "= np.array([True, True]) # BS x N question_mask = np.array([True,", "= 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2),", "return output if __name__== \"__main__\": w_1 = np.array([1., 2., 3.,", "there's padding dim: int. dimension over which to take softmax", "M x 2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2))", "x M context_mask = np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask,", "= sess.run([S, mask, c2q]) actual_result = np.tile(np.array([[228, 772, 1372], [548,", "x 2H c = np.tile(c, [BS, 1, 1]) q =", "beta, q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N,", "tf.add(logits, exp_mask) # where there's padding, set logits to -large", "M)) # BS x N x M S = tf.reshape(term1,(-1,", "term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) #", "sim_mask def test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S, mask,", "to 1 over given dimension. \"\"\" exp_mask = (1 -", "tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS", "but with 1e30 subtracted (i.e. very large negative number) in", "tf.expand_dims(context_mask, -1) # BS x N x 1 question_mask =", "np.array_equal(actual_result, S_result), 'Arrays are not equal' print (\"Building similarity matrix", "tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta, contexts) return m,", "1]) # N x BS x M x 2H q_tile", "S def test_build_sim_mask(): context_mask = np.array([True, True]) # BS x", "2 M = 3 BS = 10 def my_softmax(arr): max_elements", "= tf.add(logits, exp_mask) # where there's padding, set logits to", "tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 =", "m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x", "q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if __name__==", "BS x N x 1 question_mask = tf.expand_dims(question_mask, -1) #", "locations. Should sum to 1 over given dimension. \"\"\" exp_mask", "# BS x N question_mask = np.array([True, True, False]) #", "-1) # BS x N x 1 question_mask = tf.expand_dims(question_mask,", "test_build_c2q(S, mask, questions) m, beta, q2c = test_build_q2c(S, mask, contexts)", "(i.e. very large negative number) in the padding locations. prob_dist:", "2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1 = tf.reshape(term1,", "tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask(): context_mask =", "c = np.tile(c, [BS, 1, 1]) q = np.tile(q, [BS,", "BS x M x 1 question_mask = tf.transpose(question_mask, (0, 2,", "x M x 2H result = tf.reshape(result, (-1, N *", "2 * H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result),", "M x 2H result = tf.reshape(result, (-1, N * M,", "masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits", "H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3", "(exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array", "there's padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask)", "N x 2H x 1 result = (contexts * q_tile)", "logits. Has 1s where there's real data in logits, 0", "1 beta = tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta,", "12., 11., 10.]) c = np.array([[[1., 2., 3., 4.], [5.,", "BS x N x M S = tf.reshape(term1,(-1, N, 1))", "2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1,", "4.], [8., 9. , 10., 11.]]]) # BS x M", "M = 3 BS = 10 def my_softmax(arr): max_elements =", "-1) # BS x N x 1 beta = tf.transpose(beta,", "M)) term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2*", "tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x", "1]) questions = tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts', initializer=c) S", "dtype=tf.float64), axis=2) # BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1)", "axis=2), (BS, N, 1)) return exp_array /sum_array def masked_softmax(logits, mask,", "arr - max_elements exp_array = np.exp(arr) print (exp_array) sum_array =", "(BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3", "where there's padding dim: int. dimension over which to take", "True]) # BS x N question_mask = np.array([True, True, False])", "exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large", "same shape as logits. The result of taking softmax over", "def test_build_q2c(S, S_mask, contexts): # S = BS x N", "print (\"Building similarity matrix is successful!\") print (\"Context 2 Question", "2H x 1 result = (contexts * q_tile) # BS", "2H q = np.array([[[1., 2., 3., 0.], [5., 6., 7.,", "M term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS", "axis=2) # BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1) #", "1)) q2c = tf.matmul(beta, contexts) return m, beta, q2c def", "7., 4.], [8., 9. , 10., 11.]]]) # BS x", "* self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size", "-1)) term3 = tf.reshape(term3, (-1, N, M)) # BS x", "# BS x N term1 = tf.reshape(term1, (-1, N)) term2", "question_mask = np.array([True, True, False]) # BS x M context_mask", "tf H = 2 N = 2 M = 3", "= tf.reshape(term3, (-1, N, M)) # BS x N x", "10.]) c = np.array([[[1., 2., 3., 4.], [5., 6., 7.,", "test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S, mask, 2) #", "with tf.Session() as sess: sess.run(init) S_result, mask_result, c2q_r = sess.run([S,", "masked_logits in given dimension. Should be 0 in padding locations.", "x 2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) #", "M context_mask = np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS,", "x 1 question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS", "= tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0),", "= test_build_similarity(contexts, questions) mask = test_build_sim_mask() c2q = test_build_c2q(S, mask,", "[BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) #", "(contexts * q_tile) # BS x N x 2H x", "exp_mask) # where there's padding, set logits to -large prob_dist", "[BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask)", "padding dim: int. dimension over which to take softmax Returns:", "masked_logits: Numpy array same shape as logits. This is the", "same shape as logits. This is the same as logits,", "# BS x M term2 = tf.reshape(term2, (-1, M)) term3", "q = np.tile(q, [BS, 1, 1]) questions = tf.get_variable('questions', initializer=q)", "the same as logits, but with 1e30 subtracted (i.e. very", "# BS x N x 2H q = np.array([[[1., 2.,", "w_2 = np.array([5., 6., 7., 8.]) w_3 = np.array([13., 12.,", "= tf.reshape(result, (-1, N * M, 2 * H)) #", "tf.Session() as sess: sess.run(init) S_result, mask_result, c2q_r = sess.run([S, mask,", "-1) # BS x N x 2H x 1 result", "q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x", "# BS x M x 1 question_mask = tf.transpose(question_mask, (0,", "over masked_logits in given dimension. Should be 0 in padding", "(exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's padding, set", "logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist", "M S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2,", "2)) # BS x N x 2H x M contexts", "x N x 2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64),", "4.], [5., 6., 7., 8.]]]) # BS x N x", "1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 =", "= np.tile(c, [BS, 1, 1]) q = np.tile(q, [BS, 1,", "tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0),", "= tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N", "dimension. \"\"\" exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30)", "BS x N x M return sim_mask def test_build_c2q(S, S_mask,", "prob_dist: Numpy array same shape as logits. The result of", "data in logits, 0 where there's padding dim: int. dimension", "1828, 3140]]), [BS, 1, 1]) assert np.array_equal(actual_result, S_result), 'Arrays are", "as logits. Has 1s where there's real data in logits,", "BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis =", "dim. mask: Numpy array of same shape as logits. Has", "(1, N, 1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS,", "questions): _, alpha = masked_softmax(S, mask, 2) # BS x", "q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N, 1))", "N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1 =", "* N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1", "M x 1 question_mask = tf.transpose(question_mask, (0, 2, 1)) #", "BS x N x 2H m = tf.reduce_max(S * tf.cast(S_mask,", "in logits, 0 where there's padding dim: int. dimension over", "2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS", "# 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2", "1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask)", "6., 7., 8.]) w_3 = np.array([13., 12., 11., 10.]) c", "= BS x N x 2H m = tf.reduce_max(S *", "result of taking softmax over masked_logits in given dimension. Should", "2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2 = tf.reshape(term2,", "[5., 6., 7., 4.], [8., 9. , 10., 11.]]]) #", "int. dimension over which to take softmax Returns: masked_logits: Numpy", "(0, 2, 1)) q2c = tf.matmul(beta, contexts) return m, beta,", "sess.run(init) S_result, mask_result, c2q_r = sess.run([S, mask, c2q]) actual_result =", "S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1,", "0, 3, 2)) # BS x N x 2H x", "[548, 1828, 3140]]), [BS, 1, 1]) assert np.array_equal(actual_result, S_result), 'Arrays", "2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M])", "Inputs: logits: Numpy array. We want to take softmax over", "tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1)", "10., 11.]]]) # BS x M x 2H c =", "2 * H, M]) result = tf.transpose(result, (0, 1, 3,", "# BS x N x 2H x 1 result =", "questions) def test_build_q2c(S, S_mask, contexts): # S = BS x", "prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2", "softmax over dimension dim. mask: Numpy array of same shape", "init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) S_result, mask_result,", "= tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x", "output = test_concatenation(c2q, q2c) init = tf.global_variables_initializer() with tf.Session() as", "real data in logits, 0 where there's padding dim: int.", "BS x M context_mask = np.tile(context_mask, [BS, 1]) question_mask =", "M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result =", "* H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS,", "term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)),", "BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) #", "N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) #", "masked softmax over given dimension of logits. Inputs: logits: Numpy", "x N x 2H x 1 result = (contexts *", "0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)),", "x N term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions,", "max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr", "BS x N question_mask = np.array([True, True, False]) # BS", "set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits,", "# BS x N x 2H x M contexts =", "[BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1,", "= tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1,", "N, 1)) arr = arr - max_elements exp_array = np.exp(arr)", "BS x N x 1 beta = tf.transpose(beta, (0, 2,", "3., 0.], [5., 6., 7., 4.], [8., 9. , 10.,", "max_elements exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2),", "np.array([13., 12., 11., 10.]) c = np.array([[[1., 2., 3., 4.],", "# S = BS x N x M # contexts", "over dimension dim. mask: Numpy array of same shape as", "x M x 2H c = np.tile(c, [BS, 1, 1])", "softmax over given dimension of logits. Inputs: logits: Numpy array.", "M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): # S", "N, 4*H]) return output if __name__== \"__main__\": w_1 = np.array([1.,", "test_build_q2c(S, S_mask, contexts): # S = BS x N x", "print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return", "N, 1)) + term3 + tf.reshape(term2, (-1, 1, M)) return", "1, M)) return S def test_build_sim_mask(): context_mask = np.array([True, True])", "M contexts = tf.expand_dims(contexts, -1) # BS x N x", "tf.matmul(beta, contexts) return m, beta, q2c def test_concatenation(c2q, q2c): q2c", "# w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts,", "(-1, 1, M)) return S def test_build_sim_mask(): context_mask = np.array([True,", "M)) return S def test_build_sim_mask(): context_mask = np.array([True, True]) #", "mask: Numpy array of same shape as logits. Has 1s", "exp_array /sum_array def masked_softmax(logits, mask, dim): \"\"\" Takes masked softmax", "# BS x M context_mask = np.tile(context_mask, [BS, 1]) question_mask", "number) in the padding locations. prob_dist: Numpy array same shape", "2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2", "H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M,", "N, 1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N,", "= tf.expand_dims(contexts, -1) # BS x N x 2H x", "= tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N", "= BS x N x M # contexts = BS", "2., 3., 4.], [5., 6., 7., 8.]]]) # BS x", "array same shape as logits. This is the same as", "very large negative number) in the padding locations. prob_dist: Numpy", "x N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N", "are not equal' print (\"Building similarity matrix is successful!\") print", "= tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS", "1 question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x", "- tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding,", "S_result, mask_result, c2q_r = sess.run([S, mask, c2q]) actual_result = np.tile(np.array([[228,", "questions) m, beta, q2c = test_build_q2c(S, mask, contexts) output =", "tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts, questions) mask = test_build_sim_mask() c2q", "3, 2)) # BS x N x 2H x M", "= test_build_sim_mask() c2q = test_build_c2q(S, mask, questions) m, beta, q2c", "BS x N term1 = tf.reshape(term1, (-1, N)) term2 =", "N x 1 question_mask = tf.expand_dims(question_mask, -1) # BS x", "= tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q, q2c], axis=2)", "the padding locations. prob_dist: Numpy array same shape as logits.", "N, 2 * H, M]) result = tf.transpose(result, (0, 1,", "M, 2 * H)) # BS x (NxM) x 2H", "True, False]) # BS x M context_mask = np.tile(context_mask, [BS,", "= np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr =", "-1)) # BS x N term1 = tf.reshape(term1, (-1, N))", "(-1, N * M, 2 * H)) # BS x", "N = 2 M = 3 BS = 10 def", "context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1", "matrix is successful!\") print (\"Context 2 Question attention\") m_r, beta_r,", "prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions):", "= np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N,", "dimension over which to take softmax Returns: masked_logits: Numpy array", "# BS x 1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32),", "tf.expand_dims(question_mask, -1) # BS x M x 1 question_mask =", "dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1',", "tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1 beta =", "return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1)", "w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile =", "shape as logits. The result of taking softmax over masked_logits", "c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]])", "tf.expand_dims(w_sim_2, -1)) # BS x M term2 = tf.reshape(term2, (-1,", "This is the same as logits, but with 1e30 subtracted", "(\"Building similarity matrix is successful!\") print (\"Context 2 Question attention\")", "x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1,", "np.array([5., 6., 7., 8.]) w_3 = np.array([13., 12., 11., 10.])", "np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1]) assert", "sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array", "1 result = (contexts * q_tile) # BS x N", "dimension dim. mask: Numpy array of same shape as logits.", "= test_build_c2q(S, mask, questions) m, beta, q2c = test_build_q2c(S, mask,", "= np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) #", "np import tensorflow as tf H = 2 N =", "w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 =", "= tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N x", "equal' print (\"Building similarity matrix is successful!\") print (\"Context 2", "logits: Numpy array. We want to take softmax over dimension", "large negative number) in the padding locations. prob_dist: Numpy array", "0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1])", "similarity matrix is successful!\") print (\"Context 2 Question attention\") m_r,", "initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) #", "tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M)) # BS", "beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1", "BS x M term2 = tf.reshape(term2, (-1, M)) term3 =", "mask = test_build_sim_mask() c2q = test_build_c2q(S, mask, questions) m, beta,", "= test_build_q2c(S, mask, contexts) output = test_concatenation(c2q, q2c) init =", "# 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2", "tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS,", "dim): \"\"\" Takes masked softmax over given dimension of logits.", "[BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1])", "Returns: masked_logits: Numpy array same shape as logits. This is", "* self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1])", "'Arrays are not equal' print (\"Building similarity matrix is successful!\")", "3., 4.], [5., 6., 7., 8.]]]) # BS x N", "tensorflow as tf H = 2 N = 2 M", "which to take softmax Returns: masked_logits: Numpy array same shape", "0 in padding locations. Should sum to 1 over given", "# BS x N x 1 beta = tf.transpose(beta, (0,", "np.array([True, True]) # BS x N question_mask = np.array([True, True,", "N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3,", "[BS, 1, 1]) q = np.tile(q, [BS, 1, 1]) questions", "np.tile(q, [BS, 1, 1]) questions = tf.get_variable('questions', initializer=q) contexts =", "beta = tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta, contexts)", "N x BS x M x 2H q_tile = tf.transpose(q_tile,", "with 1e30 subtracted (i.e. very large negative number) in the", "take softmax over dimension dim. mask: Numpy array of same", "x 2H x 1 result = (contexts * q_tile) #", "negative number) in the padding locations. prob_dist: Numpy array same", "(BS, N, 1)) return exp_array /sum_array def masked_softmax(logits, mask, dim):", "import tensorflow as tf H = 2 N = 2", "mask, 2) # BS x N x M return tf.matmul(alpha,", "1)) + term3 + tf.reshape(term2, (-1, 1, M)) return S", "contexts = BS x N x 2H m = tf.reduce_max(S", "2., 3., 0.], [5., 6., 7., 4.], [8., 9. ,", "w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2,", "# N x BS x M x 2H q_tile =", "M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2 =", "N x 2H x M contexts = tf.expand_dims(contexts, -1) #", "def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS, N,", "questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2", "x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result", "# -large where there's padding, 0 elsewhere print (exp_mask) masked_logits" ]
[ "@pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\" Test MexicanHat1DKernel", "np import pytest from astropy import convolution from scipy.signal import", "pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\"", "= convolution.convolve(flux_original, custom_kernel) # Calculate the custom smoothed spec1_smoothed =", "spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input parameters with pytest.raises(ValueError):", "median filter, but less so for convolution based smoothing if", "1. Compare the smoothed flux from the astropy machinery vs", "flux_original.value) # Check the input and output units assert spec1.wavelength.unit", "two things to compare for each set of smoothing: 1.", "a number greater than 0. \"\"\" # Create the spectrum", "flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) # Test gaussian smoothing spec1_smoothed =", "with correct parmaeters. \"\"\" # Create the original spectrum spec1", "we want to compare to flux_smoothed_astropy = medfilt(flux_original, width) #", "the kernel = 1). In this second case the rtol", "weird asymmetric-ness) numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2]) numpy_kernel", "the smoothed to the original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def", "spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input paramters with pytest.raises(ValueError):", "smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\", [1,", "make a difference for median filter, but less so for", "little more difficult as smoothing will make a difference for", "custom_kernel) # Calculate the custom smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel)", "specutils. This is done by comparing flux_smooth1 and flux_smooth2. 2.", "want to compare to flux_smoothed_astropy = medfilt(flux_original, width) # Test", "need to be a number greater than 0. \"\"\" #", "difference for median filter, but less so for convolution based", "the flux_smoothed which is what we want to compare to", "spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input parameters with", "0. \"\"\" # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 #", "flux_original.value, rtol=0.15) # Check the input and output units assert", "is normalized (area under the kernel = 1). In this", "used judiciously. \"\"\" # Compare, element by element, the two", "under the kernel = 1). In this second case the", "compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check the input and output", "spectral flux of the smoothed to the original. assert np.allclose(sum(flux_smooth1),", "each set of smoothing: 1. Compare the smoothed flux from", "Test CustomKernel smoothing with correct parmaeters. \"\"\" # Create the", "\"\"\" Test Box1DKernel smoothing with incorrect parmaeters. Width values need", "to compare to flux_smoothed_astropy = medfilt(flux_original, width) # Test median", "a custom kernel (some weird asymmetric-ness) numpy_kernel = np.array([0.5, 1,", "[-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\" Test MexicanHat1DKernel smoothing", "flux_smoothed which is what we want to compare to flux_smoothed_astropy", "import pytest from astropy import convolution from scipy.signal import medfilt", "things to compare for each set of smoothing: 1. Compare", "Test bad input paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\", [1,", "# Test median smoothing spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy,", "\"\"\" There are two things to compare for each set", "[1, 3, 9]) def test_smooth_median_good(simulated_spectra, width): \"\"\" Test Median smoothing", "Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux #", "scipy.signal import medfilt import astropy.units as u from ..spectra.spectrum1d import", "2. Next we want to compare the smoothed flux to", "from ..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing", "flux_original = spec1.flux # Calculate the smoothed flux using Astropy", "smoothed flux to the original flux. This is a little", "width): \"\"\" Test Median smoothing with correct parmaeters. Width values", "= convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\", [1, 2.3]) def", "the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input parameters", "than 0. \"\"\" # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1", "the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert", "using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) #", "smoothed spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check", "Test MexicanHat1DKernel smoothing with incorrect parmaeters. Standard deviation values need", "\"\"\" Test Trapezoid1DKernel smoothing with incorrect parmaeters. Standard deviation values", "spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the", "bad input paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\", [1, 2.3])", "assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra,", "Calculate the custom smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy,", "what we want to compare to flux_smoothed_astropy = medfilt(flux_original, width)", "rtol=0.15) # Check the input and output units assert spec1.wavelength.unit", "compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): \"\"\" There are two things to", "Compare, element by element, the two smoothed fluxes. assert np.allclose(flux_smooth1,", "smoothing with incorrect parmaeters. Standard deviation values need to be", "spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the", "[-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing", "Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input", "flux to the original flux. This is a little more", "2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with correct", "smoothed to the original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra):", "correct parmaeters. \"\"\" # Create the original spectrum spec1 =", "Calculate the box smoothed spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy,", "import simulated_spectra from ..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth)", "spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def", "= convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) # Test gaussian smoothing", "smoothing with correct parmaeters. Standard deviation values need to be", "rtol=0.01): \"\"\" There are two things to compare for each", "assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1,", "# Compare, element by element, the two smoothed fluxes. assert", "Test Trapezoid1DKernel smoothing with correct parmaeters. Standard deviation values need", "= numpy_kernel / np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original,", "\"\"\" Test Gaussian1DKernel smoothing with correct parmaeters. Standard deviation values", "this second case the rtol (relative tolerance) is used judiciously.", "convolution based smoothing if the kernel is normalized (area under", "Test median smoothing spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value,", "width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check the input and", "a difference for median filter, but less so for convolution", "0, 'a']) def test_smooth_median_bad(simulated_spectra, width): \"\"\" Test Median smoothing with", "Test trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)", "stddev): \"\"\" Test Trapezoid1DKernel smoothing with incorrect parmaeters. Standard deviation", "np.array([0.5, 1, 2, 0.5, 0.2]) numpy_kernel = numpy_kernel / np.sum(numpy_kernel)", "we want to compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy =", "trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): \"\"\" There are", "import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original,", "to compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel)", "smoothing will make a difference for median filter, but less", "the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux #", "Standard deviation values need to be a number greater than", "the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with", "from ..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1,", "total spectral flux of the smoothed to the original. assert", "= np.array([0.5, 1, 2, 0.5, 0.2]) numpy_kernel = numpy_kernel /", "case the rtol (relative tolerance) is used judiciously. \"\"\" #", "for convolution based smoothing if the kernel is normalized (area", "greater than 0. \"\"\" # Create the spectrum spec1 =", "CustomKernel smoothing with correct parmaeters. \"\"\" # Create the original", "flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check the input and output units", "astropy machinery vs the smoothed flux from specutils. This is", "== spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\"", "spec1.flux # Create the flux_smoothed which is what we want", "# Calculate the smoothed flux using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev)", "(relative tolerance) is used judiciously. \"\"\" # Compare, element by", "= spec1.flux # Create the flux_smoothed which is what we", "values need to be a number greater than 0. \"\"\"", "'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\" Test MexicanHat1DKernel smoothing with incorrect", "will make a difference for median filter, but less so", "# Create the flux_smoothed which is what we want to", "the astropy machinery vs the smoothed flux from specutils. This", "Test bad input parameters with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\", [1,", "which is what we want to compare to flux_smoothed_astropy =", "@pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra, width): \"\"\" Test Box1DKernel", "is used judiciously. \"\"\" # Compare, element by element, the", "stddev): \"\"\" Test Trapezoid1DKernel smoothing with correct parmaeters. Standard deviation", "convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev)", "spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the flux_smoothed", "\"\"\" Test Median smoothing with correct parmaeters. Width values need", "from the astropy machinery vs the smoothed flux from specutils.", "with correct parmaeters. Width values need to be a number", "astropy import convolution from scipy.signal import medfilt import astropy.units as", "In this second case the rtol (relative tolerance) is used", "parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\", [1, 3, 9]) def", "number greater than 0. \"\"\" # Create the spectrum spec1", "def test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with incorrect parmaeters.", "spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra, width):", "element by element, the two smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2)", "= simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create a custom kernel", "0. \"\"\" # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original", "# Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original =", "== spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra, width): \"\"\"", "judiciously. \"\"\" # Compare, element by element, the two smoothed", "Test Box1DKernel smoothing with correct parmaeters. Width values need to", "spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the", "convolution.convolve(flux_original, gaussian_kernel) # Test gaussian smoothing spec1_smoothed = gaussian_smooth(spec1, stddev)", "/ np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) #", "Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) # Test", "compare to flux_smoothed_astropy = medfilt(flux_original, width) # Test median smoothing", "original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate", "spec1.flux # Calculate the smoothed flux using Astropy box_kernel =", "== spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a'])", "smoothing spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) #", "2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\" Test Gaussian1DKernel smoothing with correct", "gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) # Test gaussian", "median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): \"\"\" There are two", "incorrect parmaeters. Standard deviation values need to be a number", "done by comparing flux_smooth1 and flux_smooth2. 2. Next we want", "kernel (some weird asymmetric-ness) numpy_kernel = np.array([0.5, 1, 2, 0.5,", "width) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\" Test Gaussian1DKernel", "spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev):", "= simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the smoothed flux", "test_smooth_box_good(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with correct parmaeters. Width", "by element, the two smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2) #", "spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create a", "= convolution.convolve(flux_original, gaussian_kernel) # Test gaussian smoothing spec1_smoothed = gaussian_smooth(spec1,", "# Test bad input parameters with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\",", "np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) # Calculate", "input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit", "= box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input", "of the smoothed to the original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol)", "(convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01):", "box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\" Test", "the two smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2) # Compare the", "2.3]) def test_smooth_box_good(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with correct", "assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1,", "as smoothing will make a difference for median filter, but", "smoothing if the kernel is normalized (area under the kernel", "the smoothed flux using Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy =", "0.2]) numpy_kernel = numpy_kernel / np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy", "want to compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original,", "from astropy import convolution from scipy.signal import medfilt import astropy.units", "the original flux. This is a little more difficult as", "flux of the smoothed to the original. assert np.allclose(sum(flux_smooth1), sum(flux_original),", "the box smoothed spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)", "box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and", "Trapezoid1DKernel smoothing with incorrect parmaeters. Standard deviation values need to", "smoothing spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) #", "Compare the smoothed flux from the astropy machinery vs the", "median smoothing spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15)", "Gaussian1DKernel smoothing with correct parmaeters. Standard deviation values need to", "\"\"\" # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original =", "@pytest.mark.parametrize(\"width\", [1, 2.3]) def test_smooth_box_good(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing", "3, 9]) def test_smooth_median_good(simulated_spectra, width): \"\"\" Test Median smoothing with", "2, 0.5, 0.2]) numpy_kernel = numpy_kernel / np.sum(numpy_kernel) custom_kernel =", "numpy as np import pytest from astropy import convolution from", "we want to compare the smoothed flux to the original", "flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) # Calculate the custom smoothed spec1_smoothed", "Calculate the smoothed flux using Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy", "test_smooth_custom_kernel(simulated_spectra): \"\"\" Test CustomKernel smoothing with correct parmaeters. \"\"\" #", "custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\", [1, 2.3]) def test_smooth_box_good(simulated_spectra, width):", "@pytest.mark.parametrize(\"width\", [1, 3, 9]) def test_smooth_median_good(simulated_spectra, width): \"\"\" Test Median", "smoothed flux from specutils. This is done by comparing flux_smooth1", "spec1.flux # Calculate the smoothed flux using Astropy gaussian_kernel =", "Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) # Calculate", "median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check the input", "kernel is normalized (area under the kernel = 1). In", "bad input parameters with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\", [1, 2.3])", "with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev):", "def test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\" Test Gaussian1DKernel smoothing with correct parmaeters.", "(area under the kernel = 1). In this second case", "= simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the flux_smoothed which", "flux_original = spec1.flux # Create a custom kernel (some weird", "want to compare the smoothed flux to the original flux.", "[-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra, width): \"\"\" Test Median smoothing", "spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\", [1, 2.3])", "spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\" Test", "smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2) # Compare the total spectral", "pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\"", "as np import pytest from astropy import convolution from scipy.signal", "== spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\"", "spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check", "9]) def test_smooth_median_good(simulated_spectra, width): \"\"\" Test Median smoothing with correct", "Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux", "Width values need to be a number greater than 0.", "= convolution.convolve(flux_original, box_kernel) # Calculate the box smoothed spec1_smoothed =", "def test_smooth_custom_kernel(simulated_spectra): \"\"\" Test CustomKernel smoothing with correct parmaeters. \"\"\"", "assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra,", "@pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\" Test Gaussian1DKernel smoothing", "import convolution from scipy.signal import medfilt import astropy.units as u", "\"\"\" Test Median smoothing with incorrect parmaeters. Width values need", "spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check", "trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and", "box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) # Calculate the", "is what we want to compare to flux_smoothed_astropy = medfilt(flux_original,", "convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\", [1, 2.3]) def test_smooth_box_good(simulated_spectra,", "spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra, width): \"\"\" Test", "which is what we want to compare to trapezoid_kernel =", "a number greater than 0. \"\"\" # Create the original", "numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2]) numpy_kernel = numpy_kernel", "output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit", "assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): \"\"\" Test CustomKernel smoothing", "with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\", [1, 3, 9]) def test_smooth_median_good(simulated_spectra,", "fluxes. assert np.allclose(flux_smooth1, flux_smooth2) # Compare the total spectral flux", "Trapezoid1DKernel smoothing with correct parmaeters. Standard deviation values need to", "the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create", "spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1,", "spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra, width):", "\"\"\" Test Trapezoid1DKernel smoothing with correct parmaeters. Standard deviation values", "with incorrect parmaeters. Standard deviation values need to be a", "rtol (relative tolerance) is used judiciously. \"\"\" # Compare, element", "= convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) # Calculate the box", "the custom smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)", "flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check the input and output units", "the original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): \"\"\" Test", "def test_smooth_box_good(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with correct parmaeters.", "custom smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\",", "original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create", "'a']) def test_smooth_box_bad(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with incorrect", "Median smoothing with correct parmaeters. Width values need to be", "width): \"\"\" Test Box1DKernel smoothing with incorrect parmaeters. Width values", "Next we want to compare the smoothed flux to the", "gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check the input", "sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): \"\"\" Test CustomKernel smoothing with correct", "width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and output", "test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\" Test Gaussian1DKernel smoothing with correct parmaeters. Standard", "compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check the input and output", "stddev): \"\"\" Test Gaussian1DKernel smoothing with correct parmaeters. Standard deviation", "spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the smoothed", "so for convolution based smoothing if the kernel is normalized", "stddev): \"\"\" Test MexicanHat1DKernel smoothing with incorrect parmaeters. Standard deviation", "Create the flux_smoothed which is what we want to compare", "flux from the astropy machinery vs the smoothed flux from", "import astropy.units as u from ..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples", "= trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input", "spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the", "comparing flux_smooth1 and flux_smooth2. 2. Next we want to compare", "= simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): median_smooth(spec1, width)", "vs the smoothed flux from specutils. This is done by", "parameters with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra,", "the smoothed flux from the astropy machinery vs the smoothed", "box smoothed spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) #", "convolution.convolve(flux_original, custom_kernel) # Calculate the custom smoothed spec1_smoothed = convolution_smooth(spec1,", "convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) # Calculate the box smoothed", "flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) # Calculate the box smoothed spec1_smoothed", "flux_smoothed_astropy, flux_original.value) # Check the input and output units assert", "simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\",", "def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): \"\"\" There are two things", "pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\", [1, 3, 9]) def test_smooth_median_good(simulated_spectra, width):", "but less so for convolution based smoothing if the kernel", "= spec1.flux # Create a custom kernel (some weird asymmetric-ness)", "the smoothed flux using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy =", "simulated_spectra.s1_um_mJy_e1 # Test bad input paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev)", "custom kernel (some weird asymmetric-ness) numpy_kernel = np.array([0.5, 1, 2,", "be a number greater than 0. \"\"\" # Create the", "astropy.units as u from ..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples import", "stddev) @pytest.mark.parametrize(\"width\", [1, 3, 9]) def test_smooth_median_good(simulated_spectra, width): \"\"\" Test", "less so for convolution based smoothing if the kernel is", "spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def", "np.allclose(flux_smooth1, flux_smooth2) # Compare the total spectral flux of the", "def test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with correct parmaeters.", "test_smooth_median_good(simulated_spectra, width): \"\"\" Test Median smoothing with correct parmaeters. Width", "@pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra, width): \"\"\" Test Median", "is a little more difficult as smoothing will make a", "Box1DKernel smoothing with correct parmaeters. Width values need to be", "to be a number greater than 0. \"\"\" # Create", "Spectrum1D from ..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing import (convolution_smooth, box_smooth,", "difficult as smoothing will make a difference for median filter,", "width) # Test median smoothing spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value,", "# Test gaussian smoothing spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy,", "Test Median smoothing with incorrect parmaeters. Width values need to", "# Test trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy,", "Test Median smoothing with correct parmaeters. Width values need to", "flux. This is a little more difficult as smoothing will", "= spec1.flux # Calculate the smoothed flux using Astropy gaussian_kernel", "smoothing with correct parmaeters. \"\"\" # Create the original spectrum", "stddev) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel", "flux_smooth1 and flux_smooth2. 2. Next we want to compare the", "units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\",", "# Calculate the custom smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value,", "\"\"\" # Compare, element by element, the two smoothed fluxes.", "of smoothing: 1. Compare the smoothed flux from the astropy", "second case the rtol (relative tolerance) is used judiciously. \"\"\"", "# Create a custom kernel (some weird asymmetric-ness) numpy_kernel =", "to the original flux. This is a little more difficult", "\"\"\" Test Box1DKernel smoothing with correct parmaeters. Width values need", "than 0. \"\"\" # Create the original spectrum spec1 =", "from ..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth,", "convolution from scipy.signal import medfilt import astropy.units as u from", "simulated_spectra.s1_um_mJy_e1 # Test bad input parameters with pytest.raises(ValueError): box_smooth(spec1, width)", "smoothing: 1. Compare the smoothed flux from the astropy machinery", "number greater than 0. \"\"\" # Create the original spectrum", "spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0,", "rtol=0.02) # Check the input and output units assert spec1.wavelength.unit", "input paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def", "flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\", [1, 2.3]) def test_smooth_box_good(simulated_spectra, width): \"\"\" Test", "..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth,", "units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\",", "the smoothed flux to the original flux. This is a", "0.5, 0.2]) numpy_kernel = numpy_kernel / np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel)", "to compare the smoothed flux to the original flux. This", "smoothed flux using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original,", "== spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a'])", "convolution.convolve(flux_original, box_kernel) # Calculate the box smoothed spec1_smoothed = box_smooth(spec1,", "flux_original.value, rtol=0.02) # Check the input and output units assert", "element, the two smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2) # Compare", "spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input paramters with", "assert np.allclose(flux_smooth1, flux_smooth2) # Compare the total spectral flux of", "# Check the input and output units assert spec1.wavelength.unit ==", "trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) #", "original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): \"\"\" Test CustomKernel", "test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with correct parmaeters. Standard", "Median smoothing with incorrect parmaeters. Width values need to be", "gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\" Test", "with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev):", "0, 'a']) def test_smooth_box_bad(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with", "spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): median_smooth(spec1,", "the smoothed flux from specutils. This is done by comparing", "This is done by comparing flux_smooth1 and flux_smooth2. 2. Next", "import Spectrum1D from ..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing import (convolution_smooth,", "[1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): \"\"\" Test Gaussian1DKernel smoothing with", "stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check the input and", "machinery vs the smoothed flux from specutils. This is done", "Create a custom kernel (some weird asymmetric-ness) numpy_kernel = np.array([0.5,", "spec1.flux # Create a custom kernel (some weird asymmetric-ness) numpy_kernel", "stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and output", "parmaeters. Standard deviation values need to be a number greater", "= convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid smoothing", "is what we want to compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev)", "1). In this second case the rtol (relative tolerance) is", "assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra,", "simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the smoothed flux using", "bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\", [1, 3, 9])", "normalized (area under the kernel = 1). In this second", "convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid smoothing spec1_smoothed", "width): \"\"\" Test Median smoothing with incorrect parmaeters. Width values", "medfilt import astropy.units as u from ..spectra.spectrum1d import Spectrum1D from", "simulated_spectra from ..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def", "0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\" Test MexicanHat1DKernel smoothing with", "for each set of smoothing: 1. Compare the smoothed flux", "smoothing with incorrect parmaeters. Width values need to be a", "input parameters with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def", "flux_smoothed which is what we want to compare to trapezoid_kernel", "Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit", "@pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing", "as u from ..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples import simulated_spectra", "Test gaussian smoothing spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value,", "test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with incorrect parmaeters. Standard", "two smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2) # Compare the total", "\"\"\" Test CustomKernel smoothing with correct parmaeters. \"\"\" # Create", "Compare the total spectral flux of the smoothed to the", "compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize(\"width\", [1, 2.3]) def test_smooth_box_good(simulated_spectra, width): \"\"\"", "from specutils. This is done by comparing flux_smooth1 and flux_smooth2.", "deviation values need to be a number greater than 0.", "Calculate the smoothed flux using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy", "are two things to compare for each set of smoothing:", "based smoothing if the kernel is normalized (area under the", "# Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux", "numpy_kernel = numpy_kernel / np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy =", "width): \"\"\" Test Box1DKernel smoothing with correct parmaeters. Width values", "to compare for each set of smoothing: 1. Compare the", "to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) # Test", "= convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) # Calculate the custom", "spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev):", "more difficult as smoothing will make a difference for median", "= medfilt(flux_original, width) # Test median smoothing spec1_smoothed = median_smooth(spec1,", "flux using Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel)", "convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) # Calculate the custom smoothed", "tolerance) is used judiciously. \"\"\" # Compare, element by element,", "smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check", "gaussian smoothing spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02)", "= gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check the", "trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid", "smoothed flux from the astropy machinery vs the smoothed flux", "# Calculate the box smoothed spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value,", "trapezoid_kernel) # Test trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value,", "spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\" Test", "for median filter, but less so for convolution based smoothing", "= simulated_spectra.s1_um_mJy_e1 # Test bad input paramters with pytest.raises(ValueError): gaussian_smooth(spec1,", "= 1). In this second case the rtol (relative tolerance)", "np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): \"\"\" Test CustomKernel smoothing with", "and flux_smooth2. 2. Next we want to compare the smoothed", "\"\"\" # Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original", "def test_smooth_median_good(simulated_spectra, width): \"\"\" Test Median smoothing with correct parmaeters.", "= convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1,", "smoothed flux using Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original,", "box_kernel) # Calculate the box smoothed spec1_smoothed = box_smooth(spec1, width)", "medfilt(flux_original, width) # Test median smoothing spec1_smoothed = median_smooth(spec1, width)", "smoothing with correct parmaeters. Width values need to be a", "custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) # Calculate the", "pytest from astropy import convolution from scipy.signal import medfilt import", "'a']) def test_smooth_median_bad(simulated_spectra, width): \"\"\" Test Median smoothing with incorrect", "Test Trapezoid1DKernel smoothing with incorrect parmaeters. Standard deviation values need", "flux_original.value) @pytest.mark.parametrize(\"width\", [1, 2.3]) def test_smooth_box_good(simulated_spectra, width): \"\"\" Test Box1DKernel", "is done by comparing flux_smooth1 and flux_smooth2. 2. Next we", "correct parmaeters. Width values need to be a number greater", "parmaeters. \"\"\" # Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1", "def test_smooth_box_bad(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with incorrect parmaeters.", "spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0,", "rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): \"\"\" Test CustomKernel smoothing with correct parmaeters.", "correct parmaeters. Standard deviation values need to be a number", "the kernel is normalized (area under the kernel = 1).", "= simulated_spectra.s1_um_mJy_e1 # Test bad input parameters with pytest.raises(ValueError): box_smooth(spec1,", "# Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad", "flux_original, rtol=0.01): \"\"\" There are two things to compare for", "spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create a custom", "kernel = 1). In this second case the rtol (relative", "Box1DKernel smoothing with incorrect parmaeters. Width values need to be", "# Test bad input paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\",", "a little more difficult as smoothing will make a difference", "Test bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\", [1, 3,", "numpy_kernel / np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel)", "This is a little more difficult as smoothing will make", "by comparing flux_smooth1 and flux_smooth2. 2. Next we want to", "filter, but less so for convolution based smoothing if the", "..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2,", "the rtol (relative tolerance) is used judiciously. \"\"\" # Compare,", "\"\"\" # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test", "greater than 0. \"\"\" # Create the original spectrum spec1", "flux_smooth2) # Compare the total spectral flux of the smoothed", "# Calculate the smoothed flux using Astropy box_kernel = convolution.Box1DKernel(width)", "paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize(\"stddev\", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra,", "..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing import", "u from ..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples import simulated_spectra from", "with correct parmaeters. Standard deviation values need to be a", "convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) # Test gaussian smoothing spec1_smoothed", "compare the smoothed flux to the original flux. This is", "flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid smoothing spec1_smoothed =", "trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\", [1, 3, 9]) def test_smooth_median_good(simulated_spectra, width): \"\"\"", "set of smoothing: 1. Compare the smoothed flux from the", "test_smooth_median_bad(simulated_spectra, width): \"\"\" Test Median smoothing with incorrect parmaeters. Width", "= simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev)", "gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): \"\"\" There", "from scipy.signal import medfilt import astropy.units as u from ..spectra.spectrum1d", "Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters", "= spec1.flux # Calculate the smoothed flux using Astropy box_kernel", "0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with", "compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and output units", "gaussian_kernel) # Test gaussian smoothing spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value,", "test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\" Test MexicanHat1DKernel smoothing with incorrect parmaeters. Standard", "using Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) #", "to flux_smoothed_astropy = medfilt(flux_original, width) # Test median smoothing spec1_smoothed", "[-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing", "== spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra, width): \"\"\"", "if the kernel is normalized (area under the kernel =", "MexicanHat1DKernel smoothing with incorrect parmaeters. Standard deviation values need to", "test_smooth_box_bad(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with incorrect parmaeters. Width", "[1, 2.3]) def test_smooth_box_good(simulated_spectra, width): \"\"\" Test Box1DKernel smoothing with", "spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError):", "what we want to compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy", "'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with incorrect", "Test Box1DKernel smoothing with incorrect parmaeters. Width values need to", "flux_original = spec1.flux # Create the flux_smoothed which is what", "(some weird asymmetric-ness) numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2])", "= median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check the", "[1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel smoothing with", "There are two things to compare for each set of", "original flux. This is a little more difficult as smoothing", "with incorrect parmaeters. Width values need to be a number", "simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the flux_smoothed which is", "# Test bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize(\"width\", [1,", "assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra,", "compare for each set of smoothing: 1. Compare the smoothed", "flux_smoothed_astropy = medfilt(flux_original, width) # Test median smoothing spec1_smoothed =", "parmaeters. Width values need to be a number greater than", "0. \"\"\" # Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1", "@pytest.mark.parametrize(\"stddev\", [-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): \"\"\" Test Trapezoid1DKernel", "\"\"\" Test MexicanHat1DKernel smoothing with incorrect parmaeters. Standard deviation values", "flux from specutils. This is done by comparing flux_smooth1 and", "# Compare the total spectral flux of the smoothed to", "Test Gaussian1DKernel smoothing with correct parmaeters. Standard deviation values need", "compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) #", "asymmetric-ness) numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2]) numpy_kernel =", "def test_smooth_gaussian_bad(simulated_spectra, stddev): \"\"\" Test MexicanHat1DKernel smoothing with incorrect parmaeters.", "flux using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel)", "flux_smooth2, flux_original, rtol=0.01): \"\"\" There are two things to compare", "the total spectral flux of the smoothed to the original.", "the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate", "spec1_smoothed.flux.unit @pytest.mark.parametrize(\"width\", [-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra, width): \"\"\" Test", "import numpy as np import pytest from astropy import convolution", "box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): \"\"\"", "and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit ==", "the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input paramters", "1, 2, 0.5, 0.2]) numpy_kernel = numpy_kernel / np.sum(numpy_kernel) custom_kernel", "import medfilt import astropy.units as u from ..spectra.spectrum1d import Spectrum1D", "to the original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): \"\"\"", "flux_smooth2. 2. Next we want to compare the smoothed flux", "def test_smooth_median_bad(simulated_spectra, width): \"\"\" Test Median smoothing with incorrect parmaeters.", "simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create a custom kernel (some", "incorrect parmaeters. Width values need to be a number greater" ]
[ "set of points using either linear, polynomial or cubic spline", "function \"\"\" if method == \"linear\": intfunc = interp1d(x_sup, y_sup,", "x-coordinates of the function y_sup (list): y-coordinates of the function", "points using either linear, polynomial or cubic spline for the", "lagrange(x_sup, y_sup) return intfunc elif method == \"cspline\": intfunc =", "method == \"cspline\": intfunc = CubicSpline(x_sup, y_sup, bc_type=\"natural\") return intfunc", "mathematical functions out of support points\"\"\" from scipy.interpolate import interp1d,", "of the function method (string): name of the interpolation method", "y_sup) return intfunc elif method == \"cspline\": intfunc = CubicSpline(x_sup,", "of the function y_sup (list): y-coordinates of the function method", "a given set of points using either linear, polynomial or", "Returns: intfunc: interpolated function \"\"\" if method == \"linear\": intfunc", "used Returns: intfunc: interpolated function \"\"\" if method == \"linear\":", "be used Returns: intfunc: interpolated function \"\"\" if method ==", "points\"\"\" from scipy.interpolate import interp1d, lagrange, CubicSpline def interpolator(x_sup, y_sup,", "method (string): name of the interpolation method to be used", "<reponame>buulikduong/1d_sgl_solver<gh_stars>0 \"\"\"Module interpolating mathematical functions out of support points\"\"\" from", "intfunc = interp1d(x_sup, y_sup, kind=\"linear\") return intfunc elif method ==", "intfunc elif method == \"polynomial\": intfunc = lagrange(x_sup, y_sup) return", "\"polynomial\": intfunc = lagrange(x_sup, y_sup) return intfunc elif method ==", "or cubic spline for the interpolation. Args: x_sup (list): x-coordinates", "\"linear\": intfunc = interp1d(x_sup, y_sup, kind=\"linear\") return intfunc elif method", "interp1d, lagrange, CubicSpline def interpolator(x_sup, y_sup, method): \"\"\"Interpolates a mathematical", "mathematical function from a given set of points using either", "y_sup (list): y-coordinates of the function method (string): name of", "(string): name of the interpolation method to be used Returns:", "method): \"\"\"Interpolates a mathematical function from a given set of", "interpolated function \"\"\" if method == \"linear\": intfunc = interp1d(x_sup,", "support points\"\"\" from scipy.interpolate import interp1d, lagrange, CubicSpline def interpolator(x_sup,", "y-coordinates of the function method (string): name of the interpolation", "x_sup (list): x-coordinates of the function y_sup (list): y-coordinates of", "using either linear, polynomial or cubic spline for the interpolation.", "\"\"\" if method == \"linear\": intfunc = interp1d(x_sup, y_sup, kind=\"linear\")", "interpolation. Args: x_sup (list): x-coordinates of the function y_sup (list):", "== \"polynomial\": intfunc = lagrange(x_sup, y_sup) return intfunc elif method", "y_sup, kind=\"linear\") return intfunc elif method == \"polynomial\": intfunc =", "function method (string): name of the interpolation method to be", "y_sup, method): \"\"\"Interpolates a mathematical function from a given set", "lagrange, CubicSpline def interpolator(x_sup, y_sup, method): \"\"\"Interpolates a mathematical function", "intfunc = lagrange(x_sup, y_sup) return intfunc elif method == \"cspline\":", "the function method (string): name of the interpolation method to", "kind=\"linear\") return intfunc elif method == \"polynomial\": intfunc = lagrange(x_sup,", "return intfunc elif method == \"cspline\": intfunc = CubicSpline(x_sup, y_sup,", "either linear, polynomial or cubic spline for the interpolation. Args:", "Args: x_sup (list): x-coordinates of the function y_sup (list): y-coordinates", "(list): x-coordinates of the function y_sup (list): y-coordinates of the", "CubicSpline def interpolator(x_sup, y_sup, method): \"\"\"Interpolates a mathematical function from", "of points using either linear, polynomial or cubic spline for", "function from a given set of points using either linear,", "of support points\"\"\" from scipy.interpolate import interp1d, lagrange, CubicSpline def", "the interpolation method to be used Returns: intfunc: interpolated function", "function y_sup (list): y-coordinates of the function method (string): name", "a mathematical function from a given set of points using", "method to be used Returns: intfunc: interpolated function \"\"\" if", "\"\"\"Module interpolating mathematical functions out of support points\"\"\" from scipy.interpolate", "polynomial or cubic spline for the interpolation. Args: x_sup (list):", "import interp1d, lagrange, CubicSpline def interpolator(x_sup, y_sup, method): \"\"\"Interpolates a", "from a given set of points using either linear, polynomial", "the function y_sup (list): y-coordinates of the function method (string):", "return intfunc elif method == \"polynomial\": intfunc = lagrange(x_sup, y_sup)", "= interp1d(x_sup, y_sup, kind=\"linear\") return intfunc elif method == \"polynomial\":", "name of the interpolation method to be used Returns: intfunc:", "functions out of support points\"\"\" from scipy.interpolate import interp1d, lagrange,", "spline for the interpolation. Args: x_sup (list): x-coordinates of the", "interpolation method to be used Returns: intfunc: interpolated function \"\"\"", "method == \"polynomial\": intfunc = lagrange(x_sup, y_sup) return intfunc elif", "= lagrange(x_sup, y_sup) return intfunc elif method == \"cspline\": intfunc", "interpolator(x_sup, y_sup, method): \"\"\"Interpolates a mathematical function from a given", "linear, polynomial or cubic spline for the interpolation. Args: x_sup", "elif method == \"cspline\": intfunc = CubicSpline(x_sup, y_sup, bc_type=\"natural\") return", "scipy.interpolate import interp1d, lagrange, CubicSpline def interpolator(x_sup, y_sup, method): \"\"\"Interpolates", "cubic spline for the interpolation. Args: x_sup (list): x-coordinates of", "intfunc: interpolated function \"\"\" if method == \"linear\": intfunc =", "from scipy.interpolate import interp1d, lagrange, CubicSpline def interpolator(x_sup, y_sup, method):", "elif method == \"polynomial\": intfunc = lagrange(x_sup, y_sup) return intfunc", "\"cspline\": intfunc = CubicSpline(x_sup, y_sup, bc_type=\"natural\") return intfunc return None", "the interpolation. Args: x_sup (list): x-coordinates of the function y_sup", "intfunc elif method == \"cspline\": intfunc = CubicSpline(x_sup, y_sup, bc_type=\"natural\")", "out of support points\"\"\" from scipy.interpolate import interp1d, lagrange, CubicSpline", "given set of points using either linear, polynomial or cubic", "interp1d(x_sup, y_sup, kind=\"linear\") return intfunc elif method == \"polynomial\": intfunc", "interpolating mathematical functions out of support points\"\"\" from scipy.interpolate import", "if method == \"linear\": intfunc = interp1d(x_sup, y_sup, kind=\"linear\") return", "of the interpolation method to be used Returns: intfunc: interpolated", "== \"cspline\": intfunc = CubicSpline(x_sup, y_sup, bc_type=\"natural\") return intfunc return", "(list): y-coordinates of the function method (string): name of the", "def interpolator(x_sup, y_sup, method): \"\"\"Interpolates a mathematical function from a", "method == \"linear\": intfunc = interp1d(x_sup, y_sup, kind=\"linear\") return intfunc", "\"\"\"Interpolates a mathematical function from a given set of points", "to be used Returns: intfunc: interpolated function \"\"\" if method", "== \"linear\": intfunc = interp1d(x_sup, y_sup, kind=\"linear\") return intfunc elif", "for the interpolation. Args: x_sup (list): x-coordinates of the function" ]
[ "def execute(): frappe.delete_doc_if_exists(\"DocType\", \"Web View\") frappe.delete_doc_if_exists(\"DocType\", \"Web View Component\") frappe.delete_doc_if_exists(\"DocType\",", "<filename>frappe/patches/v13_0/remove_web_view.py import frappe def execute(): frappe.delete_doc_if_exists(\"DocType\", \"Web View\") frappe.delete_doc_if_exists(\"DocType\", \"Web", "import frappe def execute(): frappe.delete_doc_if_exists(\"DocType\", \"Web View\") frappe.delete_doc_if_exists(\"DocType\", \"Web View", "frappe def execute(): frappe.delete_doc_if_exists(\"DocType\", \"Web View\") frappe.delete_doc_if_exists(\"DocType\", \"Web View Component\")", "frappe.delete_doc_if_exists(\"DocType\", \"Web View\") frappe.delete_doc_if_exists(\"DocType\", \"Web View Component\") frappe.delete_doc_if_exists(\"DocType\", \"CSS Class\")", "execute(): frappe.delete_doc_if_exists(\"DocType\", \"Web View\") frappe.delete_doc_if_exists(\"DocType\", \"Web View Component\") frappe.delete_doc_if_exists(\"DocType\", \"CSS" ]
[ "= float(home['fta']) - float(away['fta']) pt_dif = float(home['pts']) - float(away['pts']) if", "lambda x: x*slope + intercept fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free", "10: continue except: continue X.append(ft_dif) y.append(pt_dif) c = 0 for", "Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA vs Point Differential') print(correlation(X, y))", "vs Point Differential') print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y, color =", "x: x*slope + intercept fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free Throw", "from util.stats import * with open('data/game_stats.json', 'r') as f: df", "'r') as f: df = json.load(f) X, y = [],", "scipy.stats import linregress from util.stats import * with open('data/game_stats.json', 'r')", "Point Differential') print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y, color = 'red')", "linregress from util.stats import * with open('data/game_stats.json', 'r') as f:", "1 print(c / len(X)) slope, intercept, r, p, std =", "+ intercept fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point", "len(X)) slope, intercept, r, p, std = linregress(X, y) f", "y.append(pt_dif) c = 0 for f, p in zip(X, y):", "[f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA vs Point", "f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA vs Point Differential')", "p in zip(X, y): if f * p > 0:", "x*slope + intercept fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts')", "json import matplotlib.pyplot as plt from pprint import pprint import", "abs(pt_dif) > 10: continue except: continue X.append(ft_dif) y.append(pt_dif) c =", "'240': continue try: ft_dif = float(home['fta']) - float(away['fta']) pt_dif =", "plt from pprint import pprint import numpy as np from", "> 10: continue except: continue X.append(ft_dif) y.append(pt_dif) c = 0", "print(c / len(X)) slope, intercept, r, p, std = linregress(X,", "import pprint import numpy as np from scipy.stats import linregress", "for f, p in zip(X, y): if f * p", "import matplotlib.pyplot as plt from pprint import pprint import numpy", "as plt from pprint import pprint import numpy as np", "= 0 for f, p in zip(X, y): if f", "linregress(X, y) f = lambda x: x*slope + intercept fit_y", "with open('data/game_stats.json', 'r') as f: df = json.load(f) X, y", "X.append(ft_dif) y.append(pt_dif) c = 0 for f, p in zip(X,", "p > 0: c += 1 print(c / len(X)) slope,", "float(home['fta']) - float(away['fta']) pt_dif = float(home['pts']) - float(away['pts']) if abs(pt_dif)", "std = linregress(X, y) f = lambda x: x*slope +", "- float(away['pts']) if abs(pt_dif) > 10: continue except: continue X.append(ft_dif)", "stats['home'], stats['away'] if home['mp'] != away['mp'] != '240': continue try:", "> 0: c += 1 print(c / len(X)) slope, intercept,", "home['mp'] != away['mp'] != '240': continue try: ft_dif = float(home['fta'])", "matplotlib.pyplot as plt from pprint import pprint import numpy as", "open('data/game_stats.json', 'r') as f: df = json.load(f) X, y =", "away['mp'] != '240': continue try: ft_dif = float(home['fta']) - float(away['fta'])", "c += 1 print(c / len(X)) slope, intercept, r, p,", "= stats['home'], stats['away'] if home['mp'] != away['mp'] != '240': continue", "plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA vs Point Differential') print(correlation(X,", "= linregress(X, y) f = lambda x: x*slope + intercept", "Differential') plt.title('FTA vs Point Differential') print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y,", "stats in df.items(): home, away = stats['home'], stats['away'] if home['mp']", "float(home['pts']) - float(away['pts']) if abs(pt_dif) > 10: continue except: continue", "p, std = linregress(X, y) f = lambda x: x*slope", "* with open('data/game_stats.json', 'r') as f: df = json.load(f) X,", "slope, intercept, r, p, std = linregress(X, y) f =", "- float(away['fta']) pt_dif = float(home['pts']) - float(away['pts']) if abs(pt_dif) >", "f: df = json.load(f) X, y = [], [] for", "float(away['fta']) pt_dif = float(home['pts']) - float(away['pts']) if abs(pt_dif) > 10:", "import linregress from util.stats import * with open('data/game_stats.json', 'r') as", "from scipy.stats import linregress from util.stats import * with open('data/game_stats.json',", "util.stats import * with open('data/game_stats.json', 'r') as f: df =", "pprint import pprint import numpy as np from scipy.stats import", "Differential') print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y, color = 'red') plt.scatter(X,", "y): if f * p > 0: c += 1", "fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA", "= json.load(f) X, y = [], [] for match, stats", "pt_dif = float(home['pts']) - float(away['pts']) if abs(pt_dif) > 10: continue", "match, stats in df.items(): home, away = stats['home'], stats['away'] if", "away = stats['home'], stats['away'] if home['mp'] != away['mp'] != '240':", "import numpy as np from scipy.stats import linregress from util.stats", "pprint import numpy as np from scipy.stats import linregress from", "json.load(f) X, y = [], [] for match, stats in", "y)) plt.plot([min(X), max(X)], fit_y, color = 'red') plt.scatter(X, y) plt.show()", "in df.items(): home, away = stats['home'], stats['away'] if home['mp'] !=", "continue try: ft_dif = float(home['fta']) - float(away['fta']) pt_dif = float(home['pts'])", "numpy as np from scipy.stats import linregress from util.stats import", "from pprint import pprint import numpy as np from scipy.stats", "zip(X, y): if f * p > 0: c +=", "np from scipy.stats import linregress from util.stats import * with", "Attempts') plt.ylabel('Point Differential') plt.title('FTA vs Point Differential') print(correlation(X, y)) plt.plot([min(X),", "= [], [] for match, stats in df.items(): home, away", "0 for f, p in zip(X, y): if f *", "X, y = [], [] for match, stats in df.items():", "* p > 0: c += 1 print(c / len(X))", "= [f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA vs", "y) f = lambda x: x*slope + intercept fit_y =", "for match, stats in df.items(): home, away = stats['home'], stats['away']", "f, p in zip(X, y): if f * p >", "try: ft_dif = float(home['fta']) - float(away['fta']) pt_dif = float(home['pts']) -", "plt.title('FTA vs Point Differential') print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y, color", "df = json.load(f) X, y = [], [] for match,", "c = 0 for f, p in zip(X, y): if", "home, away = stats['home'], stats['away'] if home['mp'] != away['mp'] !=", "continue X.append(ft_dif) y.append(pt_dif) c = 0 for f, p in", "/ len(X)) slope, intercept, r, p, std = linregress(X, y)", "y = [], [] for match, stats in df.items(): home,", "except: continue X.append(ft_dif) y.append(pt_dif) c = 0 for f, p", "f = lambda x: x*slope + intercept fit_y = [f(min(X)),", "continue except: continue X.append(ft_dif) y.append(pt_dif) c = 0 for f,", "r, p, std = linregress(X, y) f = lambda x:", "!= '240': continue try: ft_dif = float(home['fta']) - float(away['fta']) pt_dif", "plt.ylabel('Point Differential') plt.title('FTA vs Point Differential') print(correlation(X, y)) plt.plot([min(X), max(X)],", "import * with open('data/game_stats.json', 'r') as f: df = json.load(f)", "intercept, r, p, std = linregress(X, y) f = lambda", "if abs(pt_dif) > 10: continue except: continue X.append(ft_dif) y.append(pt_dif) c", "as f: df = json.load(f) X, y = [], []", "import json import matplotlib.pyplot as plt from pprint import pprint", "stats['away'] if home['mp'] != away['mp'] != '240': continue try: ft_dif", "if home['mp'] != away['mp'] != '240': continue try: ft_dif =", "as np from scipy.stats import linregress from util.stats import *", "!= away['mp'] != '240': continue try: ft_dif = float(home['fta']) -", "= float(home['pts']) - float(away['pts']) if abs(pt_dif) > 10: continue except:", "= lambda x: x*slope + intercept fit_y = [f(min(X)), f(max(X))]", "[], [] for match, stats in df.items(): home, away =", "intercept fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential')", "df.items(): home, away = stats['home'], stats['away'] if home['mp'] != away['mp']", "0: c += 1 print(c / len(X)) slope, intercept, r,", "if f * p > 0: c += 1 print(c", "[] for match, stats in df.items(): home, away = stats['home'],", "print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y, color = 'red') plt.scatter(X, y)", "+= 1 print(c / len(X)) slope, intercept, r, p, std", "ft_dif = float(home['fta']) - float(away['fta']) pt_dif = float(home['pts']) - float(away['pts'])", "in zip(X, y): if f * p > 0: c", "f * p > 0: c += 1 print(c /", "float(away['pts']) if abs(pt_dif) > 10: continue except: continue X.append(ft_dif) y.append(pt_dif)" ]
[ "horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating", "= y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test data ...')", "label to categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val))", "test def resize_img(img, max_dim=96): max_axis = np.argmax(img.size) scale = max_dim", "i in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :]", "all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example", "{} for i in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] =", "i, id in enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True) img =", "= ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape) print('Label:',", "seed=7) print('Generating augmented images') all_images = [] all_ID = []", "h1 = (max_dim - h) >> 1 h2 = h1", "ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:',", "Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic = {}, {}", "for i in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i,", "else: h1, h2, w1, w2 = 0, h, 0, w", "StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train,", "...') for i in range(len(all_images)): if i % 891 ==", "...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) =", "tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords'", "X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) def load_test_data(): ID, X_num_test", "= tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path)", "X_img_test = load_test_data() # Convert label to categorical/one-hot ID_train, y_tr,", "from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler def", "vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented", "os.path.exists(val_data_path): print('Warning: old file exists, removed.') os.remove(val_data_path) val_image, val_num, val_label", "from keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils import to_categorical from", ":] print('Loading test data ...') ID_test, X_num_test, X_img_test = load_test_data()", "print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file", "for i in range(len(val_image)): image, num, label = val_image[i], val_num[i],", "image, num, label = all_images[i], all_nums[i], all_y[i] feature = {'image':", "import to_categorical from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder,", "0, w X[i][h1:h2, w1:w2][:] = x return np.around(X / 255)", "{'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString())", "= LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize else data.values return", "data.values return ID.values, X, y def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv')", "ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val,", "in range(len(all_images)): if i % 891 == 0: print('Writing {}", "in range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y", "max_dim / img.size[max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1] * scale)))", "= StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state) train_idx, val_idx =", "= np.argmax(img.size) scale = max_dim / img.size[max_axis] return img.resize((int(img.size[0] *", "into tfrecord ...') for i in range(len(val_image)): image, num, label", "- split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr,", "def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value):", "X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx],", "= pd.read_csv('../test.csv') ID = data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize", "img_to_array, load_img from keras.utils.np_utils import to_categorical from sklearn.model_selection import StratifiedShuffleSplit", "batch {}'.format(i // 28, i % 28)) X, ID =", "tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...') for i in range(len(all_images)):", "data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values,", "X, ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool)", "sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True):", "print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if", "= load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state)", "load_test_data(): ID, X_num_test = load_numeric_test() X_img_test = load_img_data(ID) return ID,", "imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr,", "val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example", "y = data.pop('species') y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if", "= [] all_ID = [] p = True for i", "images') all_images = [] all_ID = [] p = True", "from sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv')", "Convert label to categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr),", "X_num_test, X_img_test = load_test_data() # Convert label to categorical/one-hot ID_train,", "* scale))) def load_img_data(ids, max_dim=96, center=True): X = np.empty((len(ids), max_dim,", "tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...') for i in range(len(val_image)):", "= h1 + h w1 = (max_dim - w) >>", "= to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape)", "X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data", "range(len(all_images)): if i % 891 == 0: print('Writing {} th", "print('Writing data into tfrecord ...') for i in range(len(all_images)): if", "ID_test, X_num_test, X_img_test = load_test_data() # Convert label to categorical/one-hot", "range(28 * 200): print('Generating augmented images for epoch {}, batch", "ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return", "= np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in range(len(all_ID)):", "- w) >> 1 w2 = w1 + w else:", "= tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord", "val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape)", "shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords'", "val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer =", "// 891)) image, num, label = all_images[i], all_nums[i], all_y[i] feature", "X[i][h1:h2, w1:w2][:] = x return np.around(X / 255) def load_train_data(split=0.9,", "id in enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img,", "{}, batch {}'.format(i // 28, i % 28)) X, ID", "numpy as np import pandas as pd import tensorflow as", "next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx],", "w1 + w else: h1, h2, w1, w2 = 0,", "standardize else data.values return ID.values, X, y def load_numeric_test(standardize=True): data", "_bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!')", "// 28, i % 28)) X, ID = imgen_train.next() all_images.append(X)", "print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning:", "0, h, 0, w X[i][h1:h2, w1:w2][:] = x return np.around(X", "to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def", "891)) image, num, label = all_images[i], all_nums[i], all_y[i] feature =", "tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True,", "= X_num_tr[i, :] print('Loading test data ...') ID_test, X_num_test, X_img_test", "X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data() #", "pandas as pd import tensorflow as tf from keras.preprocessing.image import", "exists, removed.') os.remove(val_data_path) val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool)", "ID_y_dic, ID_num_dic = {}, {} for i in range(len(ID_train)): ID_y_dic[ID_train[i]]", "ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val = ID[val_idx],", "data.values return ID.values, test def resize_img(img, max_dim=96): max_axis = np.argmax(img.size)", "max_axis = np.argmax(img.size) scale = max_dim / img.size[max_axis] return img.resize((int(img.size[0]", "range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y =", "h) >> 1 h2 = h1 + h w1 =", "sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state) train_idx, val_idx", "train_size=split, test_size=1 - split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y))", "y = load_numeric_training() X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split,", "= load_numeric_test() X_img_test = load_img_data(ID) return ID, X_num_test, X_img_test print('Loading", "w X[i][h1:h2, w1:w2][:] = x return np.around(X / 255) def", "split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr,", "load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state) train_idx,", "np.empty((len(ids), max_dim, max_dim, 1)) for i, id in enumerate(ids): img", "all_y.shape) print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old", "data into tfrecord ...') for i in range(len(all_images)): if i", "= tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2,", "ID = data.pop('id') y = data.pop('species') y = LabelEncoder().fit(y).transform(y) X", "StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, X, y def", "as tf from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import img_to_array,", "ID.values, test def resize_img(img, max_dim=96): max_axis = np.argmax(img.size) scale =", "val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file exists, removed.')", "ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID", "X_num_val, X_img_val, y_val) def load_test_data(): ID, X_num_test = load_numeric_test() X_img_test", "i in range(len(all_images)): if i % 891 == 0: print('Writing", "+ w else: h1, h2, w1, w2 = 0, h,", "X_num_test, X_img_test print('Loading train data ...') (ID_train, X_num_tr, X_img_tr, y_tr),", "val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True,", "'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen", "w2 = w1 + w else: h1, h2, w1, w2", "= {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature))", "load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img, max_dim=max_dim) x = img_to_array(img) h,", "random_state=7): ID, X_num_train, y = load_numeric_training() X_img_train = load_img_data(ID) sss", "(ID_val, X_num_val, X_img_val, y_val) def load_test_data(): ID, X_num_test = load_numeric_test()", "pd.read_csv('../test.csv') ID = data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize else", "return img.resize((int(img.size[0] * scale), int(img.size[1] * scale))) def load_img_data(ids, max_dim=96,", "tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing", "def load_test_data(): ID, X_num_test = load_numeric_test() X_img_test = load_img_data(ID) return", "resize_img(img, max_dim=96): max_axis = np.argmax(img.size) scale = max_dim / img.size[max_axis]", "y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr,", "file exists, removed.') os.remove(val_data_path) val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64),", "= StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, test def", "center=True): X = np.empty((len(ids), max_dim, max_dim, 1)) for i, id", "np.around(X / 255) def load_train_data(split=0.9, random_state=7): ID, X_num_train, y =", "y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def", "write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file exists,", "all_nums[i], all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())}", "import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image", "if standardize else data.values return ID.values, test def resize_img(img, max_dim=96):", "dictionary ID_y_dic, ID_num_dic = {}, {} for i in range(len(ID_train)):", "ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test data ...') ID_test, X_num_test,", "X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data() # Prepare", "= next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx],", "# Convert label to categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train),", "tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old", "_bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data():", "ID_train, batch_size=32, seed=7) print('Generating augmented images') all_images = [] all_ID", "(max_dim - w) >> 1 w2 = w1 + w", "X = np.empty((len(ids), max_dim, max_dim, 1)) for i, id in", "w else: h1, h2, w1, w2 = 0, h, 0,", "y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test data ...') ID_test,", "p = True for i in range(28 * 200): print('Generating", "w1:w2][:] = x return np.around(X / 255) def load_train_data(split=0.9, random_state=7):", "return np.around(X / 255) def load_train_data(split=0.9, random_state=7): ID, X_num_train, y", "i in range(28 * 200): print('Generating augmented images for epoch", "+ h w1 = (max_dim - w) >> 1 w2", "return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning:", "num, label = all_images[i], all_nums[i], all_y[i] feature = {'image': _bytes_feature(image.tostring()),", "for i in range(len(all_images)): if i % 891 == 0:", "for epoch {}, batch {}'.format(i // 28, i % 28))", "os.remove(val_data_path) val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape,", "X = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, X,", "_float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path):", "(ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) def load_test_data():", "'../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file exists, removed.') os.remove(train_data_path) #", "ID, X_num_test, X_img_test print('Loading train data ...') (ID_train, X_num_tr, X_img_tr,", "epoch {}, batch {}'.format(i // 28, i % 28)) X,", "for i in range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i] =", "data.pop('id') y = data.pop('species') y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data)", "1)) for i, id in enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True)", "ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32,", "* 200): print('Generating augmented images for epoch {}, batch {}'.format(i", "feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example =", "h, w = x.shape[:2] if center: h1 = (max_dim -", "= StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, X, y", "891 == 0: print('Writing {} th epoch data ...'.format(i //", "= load_img_data(ID) return ID, X_num_test, X_img_test print('Loading train data ...')", "y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data() # Prepare ID-to-label", "def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file", "_bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen =", "else data.values return ID.values, test def resize_img(img, max_dim=96): max_axis =", "exists, removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer =", "standardize else data.values return ID.values, test def resize_img(img, max_dim=96): max_axis", "StandardScaler def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID = data.pop('id') y", "print('Warning: old file exists, removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP", "axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y = np.zeros(all_ID.shape)", "all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y", "if center: h1 = (max_dim - h) >> 1 h2", "h w1 = (max_dim - w) >> 1 w2 =", "return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) def", "tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import", "else data.values return ID.values, X, y def load_numeric_test(standardize=True): data =", "_int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path", "y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val)", "pd import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from", "print('Writing data into tfrecord ...') for i in range(len(val_image)): image,", "tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def", "= X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path)", "{'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) train_writer.write(example.SerializeToString())", "if standardize else data.values return ID.values, X, y def load_numeric_test(standardize=True):", "ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic = {}, {} for", "StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, test def resize_img(img,", "load_numeric_test() X_img_test = load_img_data(ID) return ID, X_num_test, X_img_test print('Loading train", "data = pd.read_csv('../train.csv') ID = data.pop('id') y = data.pop('species') y", "w = x.shape[:2] if center: h1 = (max_dim - h)", "y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))", "% 28)) X, ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images", "as np import pandas as pd import tensorflow as tf", "i % 28)) X, ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1))", "into tfrecord ...') for i in range(len(all_images)): if i %", "{} th epoch data ...'.format(i // 891)) image, num, label", "def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if", "imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID)", "import numpy as np import pandas as pd import tensorflow", "= np.empty((len(ids), max_dim, max_dim, 1)) for i, id in enumerate(ids):", "max_dim=max_dim) x = img_to_array(img) h, w = x.shape[:2] if center:", "= np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for", "compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer =", "= ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train,", "img = load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img, max_dim=max_dim) x =", "return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path =", "train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into", "removed.') os.remove(val_data_path) val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape,", "X_num_train, y = load_numeric_training() X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1,", "data ...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val)", "int(img.size[1] * scale))) def load_img_data(ids, max_dim=96, center=True): X = np.empty((len(ids),", "# Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic = {},", "in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading", "augmented images') all_images = [] all_ID = [] p =", "all_images = np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums", "ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val,", "load_img_data(ids, max_dim=96, center=True): X = np.empty((len(ids), max_dim, max_dim, 1)) for", "import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data", "load_img from keras.utils.np_utils import to_categorical from sklearn.model_selection import StratifiedShuffleSplit from", "keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils import to_categorical from sklearn.model_selection", "y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize else data.values", "X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr,", "= [] p = True for i in range(28 *", "load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID = data.pop('id') y = data.pop('species')", "data.pop('species') y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize else", "= load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img, max_dim=max_dim) x = img_to_array(img)", "tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...')", "x.shape[:2] if center: h1 = (max_dim - h) >> 1", "= '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file exists, removed.') os.remove(train_data_path)", "in range(len(val_image)): image, num, label = val_image[i], val_num[i], val_label[i] feature", "i in range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]]", "LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values,", "tfrecord ...') for i in range(len(val_image)): image, num, label =", "print('Loading train data ...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val,", "as pd import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator", "max_dim=96, center=True): X = np.empty((len(ids), max_dim, max_dim, 1)) for i,", "resize_img(img, max_dim=max_dim) x = img_to_array(img) h, w = x.shape[:2] if", "y[train_idx] ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx]", "= pd.read_csv('../train.csv') ID = data.pop('id') y = data.pop('species') y =", "= np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums =", "h1, h2, w1, w2 = 0, h, 0, w X[i][h1:h2,", "keras.utils.np_utils import to_categorical from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import", "old file exists, removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP #", "= load_numeric_training() X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1", "from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img from", "all_y[i] = ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape)", "ID.values, X, y def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID =", "i in range(len(val_image)): image, num, label = val_image[i], val_num[i], val_label[i]", "print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest')", "image, num, label = val_image[i], val_num[i], val_label[i] feature = {'image':", "import img_to_array, load_img from keras.utils.np_utils import to_categorical from sklearn.model_selection import", ":] = ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data", "scale = max_dim / img.size[max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1]", "load_test_data() # Convert label to categorical/one-hot ID_train, y_tr, y_val =", "X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val,", "w) >> 1 w2 = w1 + w else: h1,", "X_num_val, X_img_val, y_val) = load_train_data() # Prepare ID-to-label and ID-to-numerical", "train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...') for i", "max_dim, max_dim, 1)) for i, id in enumerate(ids): img =", "ID, X_num_train, y = load_numeric_training() X_img_train = load_img_data(ID) sss =", "load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID = data.pop('id') test = StandardScaler().fit(data).transform(data)", "print('Data shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path =", "np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]]", "scale), int(img.size[1] * scale))) def load_img_data(ids, max_dim=96, center=True): X =", "range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test", "batch_size=32, seed=7) print('Generating augmented images') all_images = [] all_ID =", "y_tr), (ID_val, X_num_val, X_img_val, y_val) def load_test_data(): ID, X_num_test =", "to_categorical from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler", "to categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def", "h, 0, w X[i][h1:h2, w1:w2][:] = x return np.around(X /", "all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path):", "= ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr),", "= to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value):", "= x return np.around(X / 255) def load_train_data(split=0.9, random_state=7): ID,", "= resize_img(img, max_dim=max_dim) x = img_to_array(img) h, w = x.shape[:2]", "data into tfrecord ...') for i in range(len(val_image)): image, num,", ">> 1 w2 = w1 + w else: h1, h2,", "def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train", "all_images[i], all_nums[i], all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label':", "pd.read_csv('../train.csv') ID = data.pop('id') y = data.pop('species') y = LabelEncoder().fit(y).transform(y)", "to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))", "img.resize((int(img.size[0] * scale), int(img.size[1] * scale))) def load_img_data(ids, max_dim=96, center=True):", "max_dim=96): max_axis = np.argmax(img.size) scale = max_dim / img.size[max_axis] return", "np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i", "(ID_val, X_num_val, X_img_val, y_val) = load_train_data() # Prepare ID-to-label and", "if os.path.exists(train_data_path): print('Warning: old file exists, removed.') os.remove(train_data_path) # compression", "return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value))", ">> 1 h2 = h1 + h w1 = (max_dim", "scale))) def load_img_data(ids, max_dim=96, center=True): X = np.empty((len(ids), max_dim, max_dim,", "[] all_ID = [] p = True for i in", "sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID", "val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...') for", "zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7)", "all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:',", "= data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize else data.values return", "if os.path.exists(val_data_path): print('Warning: old file exists, removed.') os.remove(val_data_path) val_image, val_num,", "import ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils import", "= val_image[i], val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()),", "print('Generating augmented images for epoch {}, batch {}'.format(i // 28,", "all_y = np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in", "print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord", "= ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val =", "y def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID = data.pop('id') test", "X_num_tr.shape[1])) for i in range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i]", "w1 = (max_dim - w) >> 1 w2 = w1", "'../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file exists, removed.') os.remove(val_data_path) val_image,", "val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...') for i", "def load_train_data(split=0.9, random_state=7): ID, X_num_train, y = load_numeric_training() X_img_train =", "from keras.utils.np_utils import to_categorical from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing", "= (max_dim - h) >> 1 h2 = h1 +", "StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data =", "if i % 891 == 0: print('Writing {} th epoch", "max_dim, 1)) for i, id in enumerate(ids): img = load_img('../images/{}.jpg'.format(id),", "28)) X, ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images =", "np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in range(len(all_ID)): all_nums[i,", "_bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return", "example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20,", "print('Warning: old file exists, removed.') os.remove(val_data_path) val_image, val_num, val_label =", "= load_test_data() # Convert label to categorical/one-hot ID_train, y_tr, y_val", "[] p = True for i in range(28 * 200):", "train data ...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val,", "os.path.exists(train_data_path): print('Warning: old file exists, removed.') os.remove(train_data_path) # compression =", "0: print('Writing {} th epoch data ...'.format(i // 891)) image,", "def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID = data.pop('id') y =", "return ID.values, test def resize_img(img, max_dim=96): max_axis = np.argmax(img.size) scale", "center: h1 = (max_dim - h) >> 1 h2 =", "= all_images[i], all_nums[i], all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()),", "to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return", "= w1 + w else: h1, h2, w1, w2 =", "enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img, max_dim=max_dim) x", "= True for i in range(28 * 200): print('Generating augmented", "images for epoch {}, batch {}'.format(i // 28, i %", "ID_num_dic = {}, {} for i in range(len(ID_train)): ID_y_dic[ID_train[i]] =", "w1, w2 = 0, h, 0, w X[i][h1:h2, w1:w2][:] =", "categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value):", "return ID.values, X, y def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID", "= max_dim / img.size[max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1] *", "LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID = data.pop('id')", "= tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...') for i in", "for i in range(28 * 200): print('Generating augmented images for", "{}, {} for i in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]]", "and ID-to-numerical dictionary ID_y_dic, ID_num_dic = {}, {} for i", "= img_to_array(img) h, w = x.shape[:2] if center: h1 =", "X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing", "label = val_image[i], val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num':", "all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool)", "test data ...') ID_test, X_num_test, X_img_test = load_test_data() # Convert", "X, y def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID = data.pop('id')", "x return np.around(X / 255) def load_train_data(split=0.9, random_state=7): ID, X_num_train,", "train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file exists, removed.')", "# compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer", "# train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data", "augmented images for epoch {}, batch {}'.format(i // 28, i", "'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) train_writer.write(example.SerializeToString()) print('Done!') write_val_data()", "def resize_img(img, max_dim=96): max_axis = np.argmax(img.size) scale = max_dim /", "img = resize_img(img, max_dim=max_dim) x = img_to_array(img) h, w =", "random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr", "_bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) train_writer.write(example.SerializeToString()) print('Done!')", "= ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:')", "old file exists, removed.') os.remove(val_data_path) val_image, val_num, val_label = X_img_val.astype(np.bool),", "x = img_to_array(img) h, w = x.shape[:2] if center: h1", "write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train =", "print('Generating augmented images') all_images = [] all_ID = [] p", "i % 891 == 0: print('Writing {} th epoch data", "np import pandas as pd import tensorflow as tf from", "y)) ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx]", "return ID, X_num_test, X_img_test print('Loading train data ...') (ID_train, X_num_tr,", "True for i in range(28 * 200): print('Generating augmented images", "epoch data ...'.format(i // 891)) image, num, label = all_images[i],", "1 w2 = w1 + w else: h1, h2, w1,", "removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path,", "file exists, removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer", "X_img_test print('Loading train data ...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val,", "data ...') ID_test, X_num_test, X_img_test = load_test_data() # Convert label", "data = pd.read_csv('../test.csv') ID = data.pop('id') test = StandardScaler().fit(data).transform(data) if", "test_size=1 - split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr,", "= x.shape[:2] if center: h1 = (max_dim - h) >>", "X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) def load_test_data(): ID,", "y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val", "imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented images') all_images", "ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils import to_categorical", "label = all_images[i], all_nums[i], all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num':", "all_ID = np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1]))", "= data.pop('id') y = data.pop('species') y = LabelEncoder().fit(y).transform(y) X =", "ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape)", "load_img_data(ID) return ID, X_num_test, X_img_test print('Loading train data ...') (ID_train,", "y_val) = load_train_data() # Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic,", "X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val,", "28, i % 28)) X, ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID,", "X_img_val, y_val) = load_train_data() # Prepare ID-to-label and ID-to-numerical dictionary", "= imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented images') all_images =", "np.argmax(img.size) scale = max_dim / img.size[max_axis] return img.resize((int(img.size[0] * scale),", "for i, id in enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True) img", "h2, w1, w2 = 0, h, 0, w X[i][h1:h2, w1:w2][:]", "import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID =", "all_images = [] all_ID = [] p = True for", "h1 + h w1 = (max_dim - w) >> 1", "load_train_data(split=0.9, random_state=7): ID, X_num_train, y = load_numeric_training() X_img_train = load_img_data(ID)", "= np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in range(len(all_ID)): all_nums[i, :] =", "val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer", "train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr =", "X_num_tr[i, :] print('Loading test data ...') ID_test, X_num_test, X_img_test =", "test = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, test", "import pandas as pd import tensorflow as tf from keras.preprocessing.image", "y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into", "= '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file exists, removed.') os.remove(val_data_path)", "/ 255) def load_train_data(split=0.9, random_state=7): ID, X_num_train, y = load_numeric_training()", "val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx],", "...'.format(i // 891)) image, num, label = all_images[i], all_nums[i], all_y[i]", "1 h2 = h1 + h w1 = (max_dim -", "= load_train_data() # Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic", "200): print('Generating augmented images for epoch {}, batch {}'.format(i //", "= 0, h, 0, w X[i][h1:h2, w1:w2][:] = x return", "...') ID_test, X_num_test, X_img_test = load_test_data() # Convert label to", "255) def load_train_data(split=0.9, random_state=7): ID, X_num_train, y = load_numeric_training() X_img_train", "import os import numpy as np import pandas as pd", "all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in range(len(all_ID)): all_nums[i, :]", "in enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img, max_dim=max_dim)", "img_to_array(img) h, w = x.shape[:2] if center: h1 = (max_dim", "tfrecord ...') for i in range(len(all_images)): if i % 891", "load_train_data() # Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic =", "X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split,", "ID, X_num_test = load_numeric_test() X_img_test = load_img_data(ID) return ID, X_num_test,", "== 0: print('Writing {} th epoch data ...'.format(i // 891))", "num, label = val_image[i], val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()),", "= tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...') for i in", "load_numeric_training() X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 -", "X_img_val, y_val) def load_test_data(): ID, X_num_test = load_numeric_test() X_img_test =", "X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val,", "ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return", "val_image[i], val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label':", "X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val,", "/ img.size[max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1] * scale))) def", "- h) >> 1 h2 = h1 + h w1", "range(len(val_image)): image, num, label = val_image[i], val_num[i], val_label[i] feature =", "w2 = 0, h, 0, w X[i][h1:h2, w1:w2][:] = x", "X_num_test = load_numeric_test() X_img_test = load_img_data(ID) return ID, X_num_test, X_img_test", "def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data():", "os import numpy as np import pandas as pd import", "X_img_test = load_img_data(ID) return ID, X_num_test, X_img_test print('Loading train data", "= {}, {} for i in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i]", "data ...'.format(i // 891)) image, num, label = all_images[i], all_nums[i],", "val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...')", "all_ID = [] p = True for i in range(28", "all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y =", "= (max_dim - w) >> 1 w2 = w1 +", "keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils", "ID = data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize else data.values", "tf from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img", "ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test data", "{}'.format(i // 28, i % 28)) X, ID = imgen_train.next()", "in range(28 * 200): print('Generating augmented images for epoch {},", "imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented images') all_images = []", "def load_img_data(ids, max_dim=96, center=True): X = np.empty((len(ids), max_dim, max_dim, 1))", "* scale), int(img.size[1] * scale))) def load_img_data(ids, max_dim=96, center=True): X", "os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression))", "options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...') for", "print('Loading test data ...') ID_test, X_num_test, X_img_test = load_test_data() #", "(max_dim - h) >> 1 h2 = h1 + h", "th epoch data ...'.format(i // 891)) image, num, label =", "= imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID =", "y_val) def load_test_data(): ID, X_num_test = load_numeric_test() X_img_test = load_img_data(ID)", "def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID = data.pop('id') test =", "val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())}", "= data.pop('species') y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize", "(ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data()", "h2 = h1 + h w1 = (max_dim - w)", "X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx],", "ID-to-numerical dictionary ID_y_dic, ID_num_dic = {}, {} for i in", "fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented images')", "np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0],", "img.size[max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1] * scale))) def load_img_data(ids,", "all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file exists,", "grayscale=True) img = resize_img(img, max_dim=max_dim) x = img_to_array(img) h, w", "...') for i in range(len(val_image)): image, num, label = val_image[i],", "print('Writing {} th epoch data ...'.format(i // 891)) image, num,", "'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def", "% 891 == 0: print('Writing {} th epoch data ...'.format(i", "to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path", "X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr," ]
[ "w = map(int, input().split()) for _ in range(n): entrada =", "coding: utf-8 -*- n, w = map(int, input().split()) for _", "_ in range(n): entrada = input() last_space = entrada.rfind(' ')", "map(int, input().split()) for _ in range(n): entrada = input() last_space", "= input() last_space = entrada.rfind(' ') if int(entrada[last_space:]) > w:", "in range(n): entrada = input() last_space = entrada.rfind(' ') if", "-*- coding: utf-8 -*- n, w = map(int, input().split()) for", "n, w = map(int, input().split()) for _ in range(n): entrada", "entrada = input() last_space = entrada.rfind(' ') if int(entrada[last_space:]) >", "for _ in range(n): entrada = input() last_space = entrada.rfind('", "range(n): entrada = input() last_space = entrada.rfind(' ') if int(entrada[last_space:])", "= map(int, input().split()) for _ in range(n): entrada = input()", "utf-8 -*- n, w = map(int, input().split()) for _ in", "<reponame>ErickSimoes/URI-Online-Judge<gh_stars>0 # -*- coding: utf-8 -*- n, w = map(int,", "# -*- coding: utf-8 -*- n, w = map(int, input().split())", "input() last_space = entrada.rfind(' ') if int(entrada[last_space:]) > w: print(entrada[:last_space])", "-*- n, w = map(int, input().split()) for _ in range(n):", "input().split()) for _ in range(n): entrada = input() last_space =" ]
[ "ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual", "ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD), ast.NumVal(0.5),", "sklearn import ensemble from m2cgen import assemblers, ast from tests", "ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)),", "= assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr(", "def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1,", "[2], [3]], [1, 2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual =", "assert utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1],", "estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler =", "utils.cmp_exprs(actual, expected) def test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1],", "= assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5),", "ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([", "ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, -1, 1]) assembler", "ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)),", "ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual,", "= assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0),", "ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0),", "random_state=13) estimator.fit([[1], [2], [3]], [1, -1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator)", "ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr(", "ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD),", "= ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator)", "ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_two_conditions():", "<gh_stars>1-10 from sklearn import ensemble from m2cgen import assemblers, ast", "ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD),", "ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected)", "= ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0),", "= ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)),", "ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert", "def test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]],", "ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator =", "ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE),", "= ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, 2, 3]) assembler", "assemblers, ast from tests import utils def test_single_condition(): estimator =", "random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual =", "assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr(", "ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected)", "ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual,", "ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator", "estimator.fit([[1], [2], [3]], [1, -1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual", "expected) def test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2],", "ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_multi_class():", "ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr(", "def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1, 2])", "ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert", "ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def", "assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr(", "ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL)", "ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, 2, 3]) assembler =", "ensemble from m2cgen import assemblers, ast from tests import utils", "assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5),", "= assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5),", "test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1,", "ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0),", "ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]),", "actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr(", "ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert", "ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL)", "import utils def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]],", "ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def", "-1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected =", "ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([", "estimator.fit([[1], [2], [3]], [1, 2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual", "[3]], [1, -1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble()", "ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0),", "ast from tests import utils def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2,", "ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr(", "ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])),", "assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr(", "import ensemble from m2cgen import assemblers, ast from tests import", "1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinVectorNumExpr(", "assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr(", "ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr(", "random_state=13) estimator.fit([[1], [2], [3]], [1, 2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator)", "ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator = ensemble.RandomForestClassifier(", "tests import utils def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1],", "from tests import utils def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1)", "expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE),", "m2cgen import assemblers, ast from tests import utils def test_single_condition():", "2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr(", "import assemblers, ast from tests import utils def test_single_condition(): estimator", "assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13)", "2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected =", "from m2cgen import assemblers, ast from tests import utils def", "n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, -1, 1]) assembler =", "test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, 2,", "actual = assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0),", "ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0),", "actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0),", "= ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, -1, 1])", "from sklearn import ensemble from m2cgen import assemblers, ast from", "estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, 2, 3])", "assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr(", "ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator =", "expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([", "ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL)", "expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0),", "ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD),", "ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5),", "[2]], [1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected", "3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr(", "ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2,", "estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, -1,", "ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected)", "ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5),", "assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE),", "ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE),", "ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])),", "utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2],", "utils def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1,", "ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0),", "[1, 2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected", "= assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr(", "ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5),", "[3]], [1, 2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble()", "assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE),", "ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]),", "ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual,", "ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2,", "ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5),", "[1, -1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected", "test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler", "[2], [3]], [1, -1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual =", "ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([", "= ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0),", "ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator", "estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble()", "ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13)", "ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0),", "[1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected =", "expected) def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]],", "= assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0)," ]
[ "See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\" import os from setuptools import setup,", "Extension import pynumenc_meta # pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) #", "f.read() # pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__,", "Python :: 3.6' ], license='License :: OSI Approved :: MIT", "'Development Status :: 5 - Production/Stable', 'Intended Audience :: End", "- Production/Stable', 'Intended Audience :: End Users/Desktop', 'License :: OSI", "ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'], package_data={'pynumenc': ['py.typed']}, data_files=[('.', ['LICENSE.txt',", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience ::", "3.5', 'Programming Language :: Python :: 3.6' ], license='License ::", "Language :: Python :: 3.6' ], license='License :: OSI Approved", "author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended", "= f.read() # pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description,", "url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status :: 5 - Production/Stable',", "OSI Approved :: MIT License', keywords='C++ encode decode bytes encoding", "decode bytes encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev':", "\"\"\" import os from setuptools import setup, find_packages, Extension import", "https://github.com/pypa/sampleproject \"\"\" import os from setuptools import setup, find_packages, Extension", "MIT License', keywords='C++ encode decode bytes encoding decoding sorted', packages=find_packages(exclude=['docs',", "import pynumenc_meta # pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) # pylint:", "decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev': [ # yapf:", ":: Python :: 3.6' ], license='License :: OSI Approved ::", "# pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with", "install_requires=[], extras_require={ 'dev': [ # yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1',", "import os from setuptools import setup, find_packages, Extension import pynumenc_meta", "author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience", "3.6' ], license='License :: OSI Approved :: MIT License', keywords='C++", "f: long_description = f.read() # pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__,", "yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' #", "'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable ] }, ext_modules=[", "module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\" import os from setuptools import", "based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\" import os from", "# pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__,", "'Intended Audience :: End Users/Desktop', 'License :: OSI Approved ::", "'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable ]", "Production/Stable', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved", "Users/Desktop', 'License :: OSI Approved :: MIT License', 'Programming Language", ":: End Users/Desktop', 'License :: OSI Approved :: MIT License',", "setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\" import os", "https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\" import os from setuptools import setup, find_packages,", "'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable", "'dev': [ # yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4',", "encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev': [ #", "] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'], package_data={'pynumenc': ['py.typed']},", "yapf: enable ] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'],", "version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status ::", "'yapf==0.24.0' # yapf: enable ] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ],", "OSI Approved :: MIT License', 'Programming Language :: Python ::", "], license='License :: OSI Approved :: MIT License', keywords='C++ encode", "setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\" import os from setuptools", "description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status :: 5", "Language :: Python :: 3.5', 'Programming Language :: Python ::", "Approved :: MIT License', 'Programming Language :: Python :: 3.5',", "name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status", "Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'], package_data={'pynumenc': ['py.typed']}, data_files=[('.', ['LICENSE.txt', 'README.rst'])])", "from setuptools import setup, find_packages, Extension import pynumenc_meta # pylint:", "[ # yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1',", "# yapf: enable ] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'],", "enable ] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'], package_data={'pynumenc':", "License', keywords='C++ encode decode bytes encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']),", "'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable ] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp'])", "keywords='C++ encode decode bytes encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[],", "find_packages, Extension import pynumenc_meta # pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__))", "Audience :: End Users/Desktop', 'License :: OSI Approved :: MIT", "sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev': [ # yapf: disable,", "extras_require={ 'dev': [ # yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3',", "with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() #", "<reponame>Parquery/pynumenc \"\"\"A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\"", "disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf:", "pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with open(os.path.join(here,", "'tests']), install_requires=[], extras_require={ 'dev': [ # yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641',", "import setup, find_packages, Extension import pynumenc_meta # pylint: disable=redefined-builtin here", "}, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'], package_data={'pynumenc': ['py.typed']}, data_files=[('.',", "setup, find_packages, Extension import pynumenc_meta # pylint: disable=redefined-builtin here =", "# yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0'", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6'", "os from setuptools import setup, find_packages, Extension import pynumenc_meta #", "'License :: OSI Approved :: MIT License', 'Programming Language ::", "disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read()", "End Users/Desktop', 'License :: OSI Approved :: MIT License', 'Programming", "disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[", ":: OSI Approved :: MIT License', keywords='C++ encode decode bytes", "setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development", ":: OSI Approved :: MIT License', 'Programming Language :: Python", "# pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description", ":: 3.5', 'Programming Language :: Python :: 3.6' ], license='License", "\"\"\"A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject \"\"\" import", "'README.rst'), encoding='utf-8') as f: long_description = f.read() # pylint: disable=invalid-name", "pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description =", "long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status :: 5 -", "Python :: 3.5', 'Programming Language :: Python :: 3.6' ],", "encoding='utf-8') as f: long_description = f.read() # pylint: disable=invalid-name setup(", "os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:", "'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable ] }, ext_modules=[ Extension('numenc',", "open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() # pylint:", "bytes encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev': [", "pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__,", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python", "here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8')", "packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev': [ # yapf: disable, 'docutils>=0.14,<1',", "License', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable ] },", ":: MIT License', keywords='C++ encode decode bytes encoding decoding sorted',", "'Programming Language :: Python :: 3.6' ], license='License :: OSI", ":: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop', 'License", "license='License :: OSI Approved :: MIT License', keywords='C++ encode decode", "= os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8') as", "pynumenc_meta # pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name", "setuptools import setup, find_packages, Extension import pynumenc_meta # pylint: disable=redefined-builtin", "Status :: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop',", "long_description = f.read() # pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__,", "disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'),", "MIT License', 'Programming Language :: Python :: 3.5', 'Programming Language", "as f: long_description = f.read() # pylint: disable=invalid-name setup( name=pynumenc_meta.__title__,", ":: MIT License', 'Programming Language :: Python :: 3.5', 'Programming", "encode decode bytes encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={", "5 - Production/Stable', 'Intended Audience :: End Users/Desktop', 'License ::", "Approved :: MIT License', keywords='C++ encode decode bytes encoding decoding", ":: 3.6' ], license='License :: OSI Approved :: MIT License'," ]
[ "the index fullscale: 3D array containig 2D array of each", "counter == 1: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:],", "numpy as np from skimage.transform import resize from skimage import", "self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions", "minimumCol, maximumRow, maximumCol = regions.bbox character_height = maximumRow - minimumRow", "np from skimage.transform import resize from skimage import measure from", "array the starting column of each character coordinates: \"\"\" cord", "regionLists def get_regions(self, character_objects, a_license_plate): \"\"\" used to map out", "if character_height > minHeight and character_height < maxHeight and character_width", "plate charcters are the principle of connected component analysis and", "a dictionary containing the index fullscale: 3D array containig 2D", "= maximumRow - minimumRow character_width = maximumCol - minimumCol roi", "samples = resize(roi, (20,20)) cord.append(regions.bbox) counter += 1 elif counter", "import numpy as np from skimage.transform import resize from skimage", "axis=0) cord.append(regions.bbox) counter+=1 else: roismall = resize(roi, (20,20)) samples =", "elif counter == 1: roismall = resize(roi, (20,20)) samples =", "columnsVal: 1D array the starting column of each character coordinates:", "and labelling were used Parameters: ----------- a_license_plate: 2D numpy binary", "image of the license plate Returns: -------- a dictionary containing", "roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height > minHeight and character_height", "0: samples = resize(roi, (20,20)) cord.append(regions.bbox) counter += 1 elif", "[] counter=0 column_list = [] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1],", "of each character columnsVal: 1D array the starting column of", "roismall = resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox)", "the starting column of each character coordinates: \"\"\" cord =", "else: self.candidates = { 'fullscale': samples, 'coordinates': np.array(cord), 'columnsVal': column_list", "0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions for regions in", "regionLists = regionprops(labelImage) return regionLists def get_regions(self, character_objects, a_license_plate): \"\"\"", "minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox character_height = maximumRow -", "1 elif counter == 1: roismall = resize(roi, (20,20)) samples", "containing the index fullscale: 3D array containig 2D array of", "np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) == 0: self.candidates", "character_height < maxHeight and character_width > minWidth and character_width <", "cord = [] counter=0 column_list = [] character_dimensions = (0.35*a_license_plate.shape[0],", "= np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else: roismall = resize(roi,", "= resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1", "if len(column_list) == 0: self.candidates = {} else: self.candidates =", "+= 1 elif counter == 1: roismall = resize(roi, (20,20))", "samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) ==", "\"\"\" used to map out regions where the license plate", "fullscale: 3D array containig 2D array of each character columnsVal:", "2D array of each character columnsVal: 1D array the starting", "if counter == 0: samples = resize(roi, (20,20)) cord.append(regions.bbox) counter", "used Parameters: ----------- a_license_plate: 2D numpy binary image of the", "character_width = maximumCol - minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if", "character_dimensions regionLists = regionprops(labelImage) return regionLists def get_regions(self, character_objects, a_license_plate):", "labelImage = measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight,", "= resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol)", "maxWidth = character_dimensions regionLists = regionprops(labelImage) return regionLists def get_regions(self,", "character_objects: minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox character_height = maximumRow", "character_height > minHeight and character_height < maxHeight and character_width >", "cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) == 0: self.candidates = {} else:", "of each character coordinates: \"\"\" cord = [] counter=0 column_list", "were used Parameters: ----------- a_license_plate: 2D numpy binary image of", "regions in character_objects: minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox character_height", "a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])", "= np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) == 0:", "character coordinates: \"\"\" cord = [] counter=0 column_list = []", "maximumCol = regions.bbox character_height = maximumRow - minimumRow character_width =", "resize(roi, (20,20)) cord.append(regions.bbox) counter += 1 elif counter == 1:", "cord.append(regions.bbox) counter += 1 elif counter == 1: roismall =", "return regionLists def get_regions(self, character_objects, a_license_plate): \"\"\" used to map", "(20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list)", "get_regions(self, character_objects, a_license_plate): \"\"\" used to map out regions where", "for regions in character_objects: minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox", "maxHeight, minWidth, maxWidth = character_dimensions for regions in character_objects: minimumRow,", "= a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height > minHeight and character_height <", "0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions for regions", "maxHeight, minWidth, maxWidth = character_dimensions regionLists = regionprops(labelImage) return regionLists", "measure from skimage.measure import regionprops class OCROnObjects(): def __init__(self, license_plate):", "maximumRow, maximumCol = regions.bbox character_height = maximumRow - minimumRow character_width", "minHeight, maxHeight, minWidth, maxWidth = character_dimensions regionLists = regionprops(labelImage) return", "skimage import measure from skimage.measure import regionprops class OCROnObjects(): def", "def __init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self,", "counter=0 column_list = [] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])", "counter == 0: samples = resize(roi, (20,20)) cord.append(regions.bbox) counter +=", "0: self.candidates = {} else: self.candidates = { 'fullscale': samples,", "character_dimensions for regions in character_objects: minimumRow, minimumCol, maximumRow, maximumCol =", "license plate Returns: -------- a dictionary containing the index fullscale:", "column_list.append(minimumCol) if len(column_list) == 0: self.candidates = {} else: self.candidates", "where the license plate charcters are the principle of connected", "import measure from skimage.measure import regionprops class OCROnObjects(): def __init__(self,", "maxHeight and character_width > minWidth and character_width < maxWidth: if", "else: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0)", "skimage.transform import resize from skimage import measure from skimage.measure import", "def get_regions(self, character_objects, a_license_plate): \"\"\" used to map out regions", "from skimage import measure from skimage.measure import regionprops class OCROnObjects():", "array containig 2D array of each character columnsVal: 1D array", "identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1],", "= character_dimensions regionLists = regionprops(labelImage) return regionLists def get_regions(self, character_objects,", "character columnsVal: 1D array the starting column of each character", "minimumRow character_width = maximumCol - minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol]", "the license plate charcters are the principle of connected component", "minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height > minHeight and", "self.candidates = {} else: self.candidates = { 'fullscale': samples, 'coordinates':", "a_license_plate): \"\"\" used to map out regions where the license", "counter += 1 elif counter == 1: roismall = resize(roi,", "license plate charcters are the principle of connected component analysis", "OCROnObjects(): def __init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def", "minWidth, maxWidth = character_dimensions for regions in character_objects: minimumRow, minimumCol,", "self.candidates = { 'fullscale': samples, 'coordinates': np.array(cord), 'columnsVal': column_list }", "regions where the license plate charcters are the principle of", "2D numpy binary image of the license plate Returns: --------", "plate Returns: -------- a dictionary containing the index fullscale: 3D", "- minimumRow character_width = maximumCol - minimumCol roi = a_license_plate[minimumRow:maximumRow,", "\"\"\" cord = [] counter=0 column_list = [] character_dimensions =", "binary image of the license plate Returns: -------- a dictionary", "0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions regionLists =", "out regions where the license plate charcters are the principle", "character_width > minWidth and character_width < maxWidth: if counter ==", "= measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight,", "regionprops(labelImage) return regionLists def get_regions(self, character_objects, a_license_plate): \"\"\" used to", "minimumCol:maximumCol] if character_height > minHeight and character_height < maxHeight and", "regions.bbox character_height = maximumRow - minimumRow character_width = maximumCol -", "def identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0],", "- minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height > minHeight", "maxWidth: if counter == 0: samples = resize(roi, (20,20)) cord.append(regions.bbox)", "samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else: roismall =", "= regions.bbox character_height = maximumRow - minimumRow character_width = maximumCol", "and character_width > minWidth and character_width < maxWidth: if counter", "> minWidth and character_width < maxWidth: if counter == 0:", "roismall = resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox)", "3D array containig 2D array of each character columnsVal: 1D", "{ 'fullscale': samples, 'coordinates': np.array(cord), 'columnsVal': column_list } return self.candidates", "component analysis and labelling were used Parameters: ----------- a_license_plate: 2D", "license_plate) def identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0],", "from skimage.measure import regionprops class OCROnObjects(): def __init__(self, license_plate): character_objects", "column of each character coordinates: \"\"\" cord = [] counter=0", "= [] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight,", "dictionary containing the index fullscale: 3D array containig 2D array", "(0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions", "each character coordinates: \"\"\" cord = [] counter=0 column_list =", "(0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions", "map out regions where the license plate charcters are the", "character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth", "the license plate Returns: -------- a dictionary containing the index", "column_list = [] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight,", "== 1: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]),", "as np from skimage.transform import resize from skimage import measure", "= self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate)", "maximumRow - minimumRow character_width = maximumCol - minimumCol roi =", "minWidth, maxWidth = character_dimensions regionLists = regionprops(labelImage) return regionLists def", "Parameters: ----------- a_license_plate: 2D numpy binary image of the license", "character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth", "class OCROnObjects(): def __init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate)", "array of each character columnsVal: 1D array the starting column", "coordinates: \"\"\" cord = [] counter=0 column_list = [] character_dimensions", "are the principle of connected component analysis and labelling were", "resize from skimage import measure from skimage.measure import regionprops class", "labelling were used Parameters: ----------- a_license_plate: 2D numpy binary image", "= character_dimensions for regions in character_objects: minimumRow, minimumCol, maximumRow, maximumCol", "1: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0)", "(20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else: roismall", "regionprops class OCROnObjects(): def __init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects,", "1D array the starting column of each character coordinates: \"\"\"", "character_objects, a_license_plate): \"\"\" used to map out regions where the", "[] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth,", "character_width < maxWidth: if counter == 0: samples = resize(roi,", "resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if", "and character_height < maxHeight and character_width > minWidth and character_width", "minHeight and character_height < maxHeight and character_width > minWidth and", "(20,20)) cord.append(regions.bbox) counter += 1 elif counter == 1: roismall", "len(column_list) == 0: self.candidates = {} else: self.candidates = {", "of connected component analysis and labelling were used Parameters: -----------", "used to map out regions where the license plate charcters", "minHeight, maxHeight, minWidth, maxWidth = character_dimensions for regions in character_objects:", "0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions regionLists = regionprops(labelImage)", "resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else:", "import resize from skimage import measure from skimage.measure import regionprops", "principle of connected component analysis and labelling were used Parameters:", "__init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate):", "np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else: roismall = resize(roi, (20,20))", "skimage.measure import regionprops class OCROnObjects(): def __init__(self, license_plate): character_objects =", "license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate): labelImage", "numpy binary image of the license plate Returns: -------- a", "counter+=1 else: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]),", "maximumCol - minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height >", "= regionprops(labelImage) return regionLists def get_regions(self, character_objects, a_license_plate): \"\"\" used", "each character columnsVal: 1D array the starting column of each", "import regionprops class OCROnObjects(): def __init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate)", "== 0: self.candidates = {} else: self.candidates = { 'fullscale':", "roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else: roismall = resize(roi, (20,20)) samples", "character_height = maximumRow - minimumRow character_width = maximumCol - minimumCol", "0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions regionLists", "-------- a dictionary containing the index fullscale: 3D array containig", "= { 'fullscale': samples, 'coordinates': np.array(cord), 'columnsVal': column_list } return", "= (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth =", "self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions =", "----------- a_license_plate: 2D numpy binary image of the license plate", "minWidth and character_width < maxWidth: if counter == 0: samples", "= maximumCol - minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height", "starting column of each character coordinates: \"\"\" cord = []", "maxWidth = character_dimensions for regions in character_objects: minimumRow, minimumCol, maximumRow,", "the principle of connected component analysis and labelling were used", "from skimage.transform import resize from skimage import measure from skimage.measure", "in character_objects: minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox character_height =", "measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth,", "= [] counter=0 column_list = [] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0],", "Returns: -------- a dictionary containing the index fullscale: 3D array", "a_license_plate: 2D numpy binary image of the license plate Returns:", "connected component analysis and labelling were used Parameters: ----------- a_license_plate:", "containig 2D array of each character columnsVal: 1D array the", "to map out regions where the license plate charcters are", "0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions for", "= resize(roi, (20,20)) cord.append(regions.bbox) counter += 1 elif counter ==", "character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate): labelImage =", "< maxWidth: if counter == 0: samples = resize(roi, (20,20))", "a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height > minHeight and character_height < maxHeight", "charcters are the principle of connected component analysis and labelling", "and character_width < maxWidth: if counter == 0: samples =", "axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) == 0: self.candidates = {}", "> minHeight and character_height < maxHeight and character_width > minWidth", "index fullscale: 3D array containig 2D array of each character", "cord.append(regions.bbox) counter+=1 else: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:],", "= {} else: self.candidates = { 'fullscale': samples, 'coordinates': np.array(cord),", "{} else: self.candidates = { 'fullscale': samples, 'coordinates': np.array(cord), 'columnsVal':", "< maxHeight and character_width > minWidth and character_width < maxWidth:", "= (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth =", "roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) == 0: self.candidates =", "of the license plate Returns: -------- a dictionary containing the", "analysis and labelling were used Parameters: ----------- a_license_plate: 2D numpy", "== 0: samples = resize(roi, (20,20)) cord.append(regions.bbox) counter += 1" ]
[ "encode_auth_token(self, user_id): \"\"\" Generates the Auth Token :return: string \"\"\"", "app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token blacklisted. Please", ") except Exception as e: return e @staticmethod def decode_auth_token(auth_token):", "JWT tokens \"\"\" __tablename__ = 'blacklist_tokens' id = db.Column(db.Integer, primary_key=True,", "res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res: return True else: return False", "nullable=False) address = db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime, nullable=False) admin", "jwt.InvalidTokenError: return 'Invalid token. Please log in again.' class BlacklistToken(db.Model):", "nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token =", "= \"users\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(255),", "= db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean,", "= db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token = token self.blacklisted_on", "address, admin=False): self.email = email self.username = username self.password =", "related details \"\"\" __tablename__ = \"users\" id = db.Column(db.Integer, primary_key=True,", "Please log in again.' except jwt.InvalidTokenError: return 'Invalid token. Please", "tokens \"\"\" __tablename__ = 'blacklist_tokens' id = db.Column(db.Integer, primary_key=True, autoincrement=True)", "\"\"\" try: payload = { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5),", "log in again.' else: return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature", "is_blacklisted_token: return 'Token blacklisted. Please log in again.' else: return", "self.username = username self.password = <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name", "self.password = <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name self.age", "token :param auth_token: :return: integer|string \"\"\" try: payload = jwt.decode(auth_token,", "autoincrement=True) username = db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255), unique=True,", "if is_blacklisted_token: return 'Token blacklisted. Please log in again.' else:", "Exception as e: return e @staticmethod def decode_auth_token(auth_token): \"\"\" Validates", "user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception", "= db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) name =", "except Exception as e: return e @staticmethod def decode_auth_token(auth_token): \"\"\"", "return 'Signature expired. Please log in again.' except jwt.InvalidTokenError: return", "'Invalid token. Please log in again.' class BlacklistToken(db.Model): \"\"\" Token", "token has been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res: return", "token = db.Column(db.String(500), unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def", "= db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255), unique=True, nullable=False) password", "id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(255), unique=True, nullable=False)", "name = db.Column(db.String(255), nullable=False) age = db.Column(db.Integer, nullable=False) address =", "= datetime.datetime.now() def __repr__(self): return '<id: token: {}'.format(self.token) @staticmethod def", "try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token:", "jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token blacklisted.", "autoincrement=True) token = db.Column(db.String(500), unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False)", "as e: return e @staticmethod def decode_auth_token(auth_token): \"\"\" Validates the", "bcrypt class User(db.Model): \"\"\" User Model for storing user related", "name self.age = age self.address = address self.registered_on = datetime.datetime.now()", "unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token", "log in again.' class BlacklistToken(db.Model): \"\"\" Token Model for storing", "for storing user related details \"\"\" __tablename__ = \"users\" id", "return '<id: token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token): # Check whether", "admin def encode_auth_token(self, user_id): \"\"\" Generates the Auth Token :return:", "email, username, password, name, age, address, admin=False): self.email = email", "integer|string \"\"\" try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)", "primary_key=True, autoincrement=True) token = db.Column(db.String(500), unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime,", "\"\"\" User Model for storing user related details \"\"\" __tablename__", "whether auth token has been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if", "password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name self.age = age self.address", "db.Column(db.String(255), nullable=False) name = db.Column(db.String(255), nullable=False) age = db.Column(db.Integer, nullable=False)", "__tablename__ = \"users\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) username =", "= jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token", "registered_on = db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False) def", ":return: string \"\"\" try: payload = { 'exp': datetime.datetime.utcnow() +", "\"\"\" Generates the Auth Token :return: string \"\"\" try: payload", "token self.blacklisted_on = datetime.datetime.now() def __repr__(self): return '<id: token: {}'.format(self.token)", "\"\"\" try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if", "self.registered_on = datetime.datetime.now() self.admin = admin def encode_auth_token(self, user_id): \"\"\"", "Token Model for storing JWT tokens \"\"\" __tablename__ = 'blacklist_tokens'", "'blacklist_tokens' id = db.Column(db.Integer, primary_key=True, autoincrement=True) token = db.Column(db.String(500), unique=True,", "project/server/models.py import jwt import datetime from project.server import app, db,", "try: payload = { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat':", "= db.Column(db.Integer, primary_key=True, autoincrement=True) token = db.Column(db.String(500), unique=True, nullable=False) blacklisted_on", "= token self.blacklisted_on = datetime.datetime.now() def __repr__(self): return '<id: token:", "age, address, admin=False): self.email = email self.username = username self.password", "nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False) def __init__(self, email, username,", "= db.Column(db.String(255), nullable=False) name = db.Column(db.String(255), nullable=False) age = db.Column(db.Integer,", "__init__(self, email, username, password, name, age, address, admin=False): self.email =", "self.name = name self.age = age self.address = address self.registered_on", "= age self.address = address self.registered_on = datetime.datetime.now() self.admin =", "in again.' else: return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired.", "auth token :param auth_token: :return: integer|string \"\"\" try: payload =", "'<id: token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token): # Check whether auth", "self.age = age self.address = address self.registered_on = datetime.datetime.now() self.admin", "db.Column(db.String(500), unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self, token):", "Token :return: string \"\"\" try: payload = { 'exp': datetime.datetime.utcnow()", "'iat': datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256'", "import jwt import datetime from project.server import app, db, bcrypt", "datetime from project.server import app, db, bcrypt class User(db.Model): \"\"\"", "details \"\"\" __tablename__ = \"users\" id = db.Column(db.Integer, primary_key=True, autoincrement=True)", "Please log in again.' else: return payload['sub'] except jwt.ExpiredSignatureError: return", "= datetime.datetime.now() self.admin = admin def encode_auth_token(self, user_id): \"\"\" Generates", "= db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime,", "db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255), unique=True, nullable=False) password =", "nullable=False) registered_on = db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False)", "User(db.Model): \"\"\" User Model for storing user related details \"\"\"", "db, bcrypt class User(db.Model): \"\"\" User Model for storing user", "db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) name = db.Column(db.String(255),", "username = db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255), unique=True, nullable=False)", "'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id }", "password = db.Column(db.String(255), nullable=False) name = db.Column(db.String(255), nullable=False) age =", "Auth Token :return: string \"\"\" try: payload = { 'exp':", "auth_token: :return: integer|string \"\"\" try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token", "return 'Token blacklisted. Please log in again.' else: return payload['sub']", "default=False) def __init__(self, email, username, password, name, age, address, admin=False):", "email self.username = username self.password = <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode()", "+ datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode(", "= db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False) def __init__(self,", "token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token): # Check whether auth token", "= db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(255), unique=True, nullable=False) email", "admin=False): self.email = email self.username = username self.password = <PASSWORD>.generate_password_hash(", ":return: integer|string \"\"\" try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token =", "db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean, nullable=False,", "storing JWT tokens \"\"\" __tablename__ = 'blacklist_tokens' id = db.Column(db.Integer,", "blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token = token", "decode_auth_token(auth_token): \"\"\" Validates the auth token :param auth_token: :return: integer|string", "jwt.ExpiredSignatureError: return 'Signature expired. Please log in again.' except jwt.InvalidTokenError:", "primary_key=True, autoincrement=True) username = db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255),", "jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as e: return", "datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id } return", "# Check whether auth token has been blacklisted res =", "has been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res: return True", "username, password, name, age, address, admin=False): self.email = email self.username", "id = db.Column(db.Integer, primary_key=True, autoincrement=True) token = db.Column(db.String(500), unique=True, nullable=False)", "age = db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255), nullable=False) registered_on =", "def __repr__(self): return '<id: token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token): #", "Model for storing user related details \"\"\" __tablename__ = \"users\"", "e: return e @staticmethod def decode_auth_token(auth_token): \"\"\" Validates the auth", "Validates the auth token :param auth_token: :return: integer|string \"\"\" try:", "for storing JWT tokens \"\"\" __tablename__ = 'blacklist_tokens' id =", "age self.address = address self.registered_on = datetime.datetime.now() self.admin = admin", "datetime.datetime.now() self.admin = admin def encode_auth_token(self, user_id): \"\"\" Generates the", "string \"\"\" try: payload = { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0,", "the auth token :param auth_token: :return: integer|string \"\"\" try: payload", "= name self.age = age self.address = address self.registered_on =", "payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return", "class BlacklistToken(db.Model): \"\"\" Token Model for storing JWT tokens \"\"\"", ":param auth_token: :return: integer|string \"\"\" try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))", "token): self.token = token self.blacklisted_on = datetime.datetime.now() def __repr__(self): return", "nullable=False) age = db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255), nullable=False) registered_on", "seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'),", "} return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as", "return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as e:", "nullable=False, default=False) def __init__(self, email, username, password, name, age, address,", "import datetime from project.server import app, db, bcrypt class User(db.Model):", "return 'Invalid token. Please log in again.' class BlacklistToken(db.Model): \"\"\"", "__tablename__ = 'blacklist_tokens' id = db.Column(db.Integer, primary_key=True, autoincrement=True) token =", "jwt import datetime from project.server import app, db, bcrypt class", "user related details \"\"\" __tablename__ = \"users\" id = db.Column(db.Integer,", "self.blacklisted_on = datetime.datetime.now() def __repr__(self): return '<id: token: {}'.format(self.token) @staticmethod", "unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) name = db.Column(db.String(255), nullable=False)", "return e @staticmethod def decode_auth_token(auth_token): \"\"\" Validates the auth token", "db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime, nullable=False)", "payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired. Please log in again.'", "app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as e: return e @staticmethod", "class User(db.Model): \"\"\" User Model for storing user related details", "again.' class BlacklistToken(db.Model): \"\"\" Token Model for storing JWT tokens", "<PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name self.age = age", "from project.server import app, db, bcrypt class User(db.Model): \"\"\" User", "else: return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired. Please log", "in again.' class BlacklistToken(db.Model): \"\"\" Token Model for storing JWT", "self.admin = admin def encode_auth_token(self, user_id): \"\"\" Generates the Auth", "token. Please log in again.' class BlacklistToken(db.Model): \"\"\" Token Model", "address = db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime, nullable=False) admin =", "= db.Column(db.String(500), unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self,", "db.Column(db.String(255), nullable=False) age = db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255), nullable=False)", "self.token = token self.blacklisted_on = datetime.datetime.now() def __repr__(self): return '<id:", "= { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub':", "BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token blacklisted. Please log in again.'", "'Signature expired. Please log in again.' except jwt.InvalidTokenError: return 'Invalid", "def check_blacklist(auth_token): # Check whether auth token has been blacklisted", "admin = db.Column(db.Boolean, nullable=False, default=False) def __init__(self, email, username, password,", "algorithm='HS256' ) except Exception as e: return e @staticmethod def", "Model for storing JWT tokens \"\"\" __tablename__ = 'blacklist_tokens' id", "nullable=False) email = db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False)", "def __init__(self, email, username, password, name, age, address, admin=False): self.email", "def decode_auth_token(auth_token): \"\"\" Validates the auth token :param auth_token: :return:", "app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name self.age = age self.address =", "storing user related details \"\"\" __tablename__ = \"users\" id =", "return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired. Please log in", "auth token has been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res:", "datetime.datetime.now() def __repr__(self): return '<id: token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token):", "blacklisted. Please log in again.' else: return payload['sub'] except jwt.ExpiredSignatureError:", "again.' else: return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired. Please", "blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res: return True else: return", "db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False) def __init__(self, email,", "payload = { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(),", "username self.password = <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name", "= BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token blacklisted. Please log in", "= username self.password = <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name =", "__init__(self, token): self.token = token self.blacklisted_on = datetime.datetime.now() def __repr__(self):", "Check whether auth token has been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first()", "been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res: return True else:", "= 'blacklist_tokens' id = db.Column(db.Integer, primary_key=True, autoincrement=True) token = db.Column(db.String(500),", "check_blacklist(auth_token): # Check whether auth token has been blacklisted res", "unique=True, nullable=False) email = db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255),", "self.email = email self.username = username self.password = <PASSWORD>.generate_password_hash( password,", "import app, db, bcrypt class User(db.Model): \"\"\" User Model for", "address self.registered_on = datetime.datetime.now() self.admin = admin def encode_auth_token(self, user_id):", "self.address = address self.registered_on = datetime.datetime.now() self.admin = admin def", "payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as e: return e", "{}'.format(self.token) @staticmethod def check_blacklist(auth_token): # Check whether auth token has", "# project/server/models.py import jwt import datetime from project.server import app,", "\"\"\" __tablename__ = 'blacklist_tokens' id = db.Column(db.Integer, primary_key=True, autoincrement=True) token", "name, age, address, admin=False): self.email = email self.username = username", "= db.Column(db.String(255), nullable=False) age = db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255),", "datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode( payload,", "except jwt.InvalidTokenError: return 'Invalid token. Please log in again.' class", "the Auth Token :return: string \"\"\" try: payload = {", "email = db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) name", "@staticmethod def check_blacklist(auth_token): # Check whether auth token has been", "def __init__(self, token): self.token = token self.blacklisted_on = datetime.datetime.now() def", "password, name, age, address, admin=False): self.email = email self.username =", "nullable=False) password = db.Column(db.String(255), nullable=False) name = db.Column(db.String(255), nullable=False) age", "def encode_auth_token(self, user_id): \"\"\" Generates the Auth Token :return: string", "log in again.' except jwt.InvalidTokenError: return 'Invalid token. Please log", "__repr__(self): return '<id: token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token): # Check", "@staticmethod def decode_auth_token(auth_token): \"\"\" Validates the auth token :param auth_token:", "User Model for storing user related details \"\"\" __tablename__ =", "'sub': user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except", "again.' except jwt.InvalidTokenError: return 'Invalid token. Please log in again.'", ").decode() self.name = name self.age = age self.address = address", "Generates the Auth Token :return: string \"\"\" try: payload =", "= address self.registered_on = datetime.datetime.now() self.admin = admin def encode_auth_token(self,", "db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(255), unique=True, nullable=False) email =", "Please log in again.' class BlacklistToken(db.Model): \"\"\" Token Model for", "{ 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id", "app, db, bcrypt class User(db.Model): \"\"\" User Model for storing", "\"users\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(255), unique=True,", "in again.' except jwt.InvalidTokenError: return 'Invalid token. Please log in", "\"\"\" Token Model for storing JWT tokens \"\"\" __tablename__ =", "= email self.username = username self.password = <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS')", "db.Column(db.Integer, primary_key=True, autoincrement=True) token = db.Column(db.String(500), unique=True, nullable=False) blacklisted_on =", "db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token = token self.blacklisted_on =", "BlacklistToken(db.Model): \"\"\" Token Model for storing JWT tokens \"\"\" __tablename__", "except jwt.ExpiredSignatureError: return 'Signature expired. Please log in again.' except", "= admin def encode_auth_token(self, user_id): \"\"\" Generates the Auth Token", "\"\"\" Validates the auth token :param auth_token: :return: integer|string \"\"\"", "= <PASSWORD>.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name self.age =", "is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token blacklisted. Please log", "'Token blacklisted. Please log in again.' else: return payload['sub'] except", "\"\"\" __tablename__ = \"users\" id = db.Column(db.Integer, primary_key=True, autoincrement=True) username", "datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' )", "user_id): \"\"\" Generates the Auth Token :return: string \"\"\" try:", "project.server import app, db, bcrypt class User(db.Model): \"\"\" User Model", "e @staticmethod def decode_auth_token(auth_token): \"\"\" Validates the auth token :param", "nullable=False) name = db.Column(db.String(255), nullable=False) age = db.Column(db.Integer, nullable=False) address", "nullable=False) def __init__(self, token): self.token = token self.blacklisted_on = datetime.datetime.now()", "= db.Column(db.Boolean, nullable=False, default=False) def __init__(self, email, username, password, name,", "expired. Please log in again.' except jwt.InvalidTokenError: return 'Invalid token.", "db.Column(db.Boolean, nullable=False, default=False) def __init__(self, email, username, password, name, age," ]
[ "install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt = certbot.main:main', ], }, )", "Linux', 'Programming Language :: Python', 'Programming Language :: Python ::", "'Programming Language :: Python :: 2', 'Programming Language :: Python", "codecs import os import sys from setuptools import setup from", "version = '0.7.0.dev0' setup( name='letsencrypt', version=version, description=\"ACME client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt',", ":: 3 - Alpha', 'Environment :: Console', 'Environment :: Console", "Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming", ":: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt", "Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic ::", ":: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Security',", "fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst'))", "3 - Alpha', 'Environment :: Console', 'Environment :: Console ::", ":: Internet :: WWW/HTTP', 'Topic :: Security', 'Topic :: System", "setup from setuptools import find_packages def read_file(filename, encoding='utf8'): \"\"\"Read unicode", "install_requires = ['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt', version=version, description=\"ACME", ":: 2.6', 'Programming Language :: Python :: 2.7', 'Topic ::", "read_file(os.path.join(here, 'README.rst')) # This package is a simple shim around", "Curses', 'Intended Audience :: System Administrators', 'License :: OSI Approved", "from setuptools import setup from setuptools import find_packages def read_file(filename,", "Approved :: Apache Software License', 'Operating System :: POSIX ::", "from setuptools import find_packages def read_file(filename, encoding='utf8'): \"\"\"Read unicode from", "], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt = certbot.main:main',", "Console :: Curses', 'Intended Audience :: System Administrators', 'License ::", "author=\"Certbot Project\", author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development Status ::", "This package is a simple shim around certbot install_requires =", "Installation/Setup', 'Topic :: System :: Networking', 'Topic :: System ::", "Language :: Python :: 2.6', 'Programming Language :: Python ::", "'Topic :: System :: Systems Administration', 'Topic :: Utilities', ],", "'Topic :: Security', 'Topic :: System :: Installation/Setup', 'Topic ::", "'Topic :: System :: Installation/Setup', 'Topic :: System :: Networking',", ":: System :: Systems Administration', 'Topic :: Utilities', ], packages=find_packages(),", "os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) # This package is a", "Python', 'Programming Language :: Python :: 2', 'Programming Language ::", "'License :: OSI Approved :: Apache Software License', 'Operating System", "'README.rst')) # This package is a simple shim around certbot", "Audience :: System Administrators', 'License :: OSI Approved :: Apache", "author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development Status :: 3 -", "'Programming Language :: Python :: 2.7', 'Topic :: Internet ::", "POSIX :: Linux', 'Programming Language :: Python', 'Programming Language ::", "simple shim around certbot install_requires = ['certbot'] version = '0.7.0.dev0'", "long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\", author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development", "2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Security', 'Topic", "return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) #", ":: Console', 'Environment :: Console :: Curses', 'Intended Audience ::", "file.\"\"\" with codecs.open(filename, encoding=encoding) as fd: return fd.read() here =", "client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\", author_email='<EMAIL>', license='Apache License 2.0', classifiers=[", "url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\", author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development Status", ":: OSI Approved :: Apache Software License', 'Operating System ::", ":: Console :: Curses', 'Intended Audience :: System Administrators', 'License", "System :: Installation/Setup', 'Topic :: System :: Networking', 'Topic ::", "shim around certbot install_requires = ['certbot'] version = '0.7.0.dev0' setup(", "setuptools import setup from setuptools import find_packages def read_file(filename, encoding='utf8'):", "'0.7.0.dev0' setup( name='letsencrypt', version=version, description=\"ACME client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\",", ":: System Administrators', 'License :: OSI Approved :: Apache Software", "2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment ::", "Language :: Python', 'Programming Language :: Python :: 2', 'Programming", "'Topic :: Internet :: WWW/HTTP', 'Topic :: Security', 'Topic ::", ":: Python :: 2.6', 'Programming Language :: Python :: 2.7',", "OSI Approved :: Apache Software License', 'Operating System :: POSIX", "def read_file(filename, encoding='utf8'): \"\"\"Read unicode from given file.\"\"\" with codecs.open(filename,", "unicode from given file.\"\"\" with codecs.open(filename, encoding=encoding) as fd: return", "WWW/HTTP', 'Topic :: Security', 'Topic :: System :: Installation/Setup', 'Topic", "sys from setuptools import setup from setuptools import find_packages def", "import os import sys from setuptools import setup from setuptools", "Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP',", "Internet :: WWW/HTTP', 'Topic :: Security', 'Topic :: System ::", ":: WWW/HTTP', 'Topic :: Security', 'Topic :: System :: Installation/Setup',", "= '0.7.0.dev0' setup( name='letsencrypt', version=version, description=\"ACME client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot", "import find_packages def read_file(filename, encoding='utf8'): \"\"\"Read unicode from given file.\"\"\"", "os import sys from setuptools import setup from setuptools import", "given file.\"\"\" with codecs.open(filename, encoding=encoding) as fd: return fd.read() here", "'Intended Audience :: System Administrators', 'License :: OSI Approved ::", "System Administrators', 'License :: OSI Approved :: Apache Software License',", "Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt =", "Alpha', 'Environment :: Console', 'Environment :: Console :: Curses', 'Intended", "System :: Systems Administration', 'Topic :: Utilities', ], packages=find_packages(), include_package_data=True,", "System :: Networking', 'Topic :: System :: Systems Administration', 'Topic", ":: 2', 'Programming Language :: Python :: 2.6', 'Programming Language", "Administration', 'Topic :: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts':", "License 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment", "'Topic :: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [", "fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) # This", "Administrators', 'License :: OSI Approved :: Apache Software License', 'Operating", "version=version, description=\"ACME client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\", author_email='<EMAIL>', license='Apache License", "around certbot install_requires = ['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt',", "License', 'Operating System :: POSIX :: Linux', 'Programming Language ::", "- Alpha', 'Environment :: Console', 'Environment :: Console :: Curses',", "= ['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt', version=version, description=\"ACME client\",", "import codecs import os import sys from setuptools import setup", "# This package is a simple shim around certbot install_requires", "'Environment :: Console :: Curses', 'Intended Audience :: System Administrators',", "Console', 'Environment :: Console :: Curses', 'Intended Audience :: System", "is a simple shim around certbot install_requires = ['certbot'] version", ":: Apache Software License', 'Operating System :: POSIX :: Linux',", "Software License', 'Operating System :: POSIX :: Linux', 'Programming Language", "import sys from setuptools import setup from setuptools import find_packages", "certbot install_requires = ['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt', version=version,", "encoding=encoding) as fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme =", "license='Apache License 2.0', classifiers=[ 'Development Status :: 3 - Alpha',", "'Operating System :: POSIX :: Linux', 'Programming Language :: Python',", "as fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here,", ":: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language", "find_packages def read_file(filename, encoding='utf8'): \"\"\"Read unicode from given file.\"\"\" with", ":: Linux', 'Programming Language :: Python', 'Programming Language :: Python", "Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming", "here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) # This package", "Systems Administration', 'Topic :: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={", "setup( name='letsencrypt', version=version, description=\"ACME client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\", author_email='<EMAIL>',", "2', 'Programming Language :: Python :: 2.6', 'Programming Language ::", "name='letsencrypt', version=version, description=\"ACME client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\", author_email='<EMAIL>', license='Apache", "encoding='utf8'): \"\"\"Read unicode from given file.\"\"\" with codecs.open(filename, encoding=encoding) as", "Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic", "with codecs.open(filename, encoding=encoding) as fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__))", "['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt', version=version, description=\"ACME client\", long_description=readme,", "System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming", "2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Internet", "'Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment", "'Programming Language :: Python', 'Programming Language :: Python :: 2',", ":: Python :: 2', 'Programming Language :: Python :: 2.6',", "<reponame>ccppuu/certbot import codecs import os import sys from setuptools import", "classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console',", "packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt = certbot.main:main', ],", ":: System :: Networking', 'Topic :: System :: Systems Administration',", "'Programming Language :: Python :: 2.6', 'Programming Language :: Python", "setuptools import find_packages def read_file(filename, encoding='utf8'): \"\"\"Read unicode from given", "\"\"\"Read unicode from given file.\"\"\" with codecs.open(filename, encoding=encoding) as fd:", ":: Python', 'Programming Language :: Python :: 2', 'Programming Language", "from given file.\"\"\" with codecs.open(filename, encoding=encoding) as fd: return fd.read()", ":: Curses', 'Intended Audience :: System Administrators', 'License :: OSI", "a simple shim around certbot install_requires = ['certbot'] version =", "Security', 'Topic :: System :: Installation/Setup', 'Topic :: System ::", ":: System :: Installation/Setup', 'Topic :: System :: Networking', 'Topic", ":: Installation/Setup', 'Topic :: System :: Networking', 'Topic :: System", ":: Security', 'Topic :: System :: Installation/Setup', 'Topic :: System", "readme = read_file(os.path.join(here, 'README.rst')) # This package is a simple", "package is a simple shim around certbot install_requires = ['certbot']", "'Environment :: Console', 'Environment :: Console :: Curses', 'Intended Audience", "= os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) # This package is", ":: Systems Administration', 'Topic :: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires,", "include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt = certbot.main:main', ], },", "= read_file(os.path.join(here, 'README.rst')) # This package is a simple shim", "import setup from setuptools import find_packages def read_file(filename, encoding='utf8'): \"\"\"Read", "codecs.open(filename, encoding=encoding) as fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme", "Project\", author_email='<EMAIL>', license='Apache License 2.0', classifiers=[ 'Development Status :: 3", "read_file(filename, encoding='utf8'): \"\"\"Read unicode from given file.\"\"\" with codecs.open(filename, encoding=encoding)", ":: Networking', 'Topic :: System :: Systems Administration', 'Topic ::", "'Topic :: System :: Networking', 'Topic :: System :: Systems", ":: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic", "Networking', 'Topic :: System :: Systems Administration', 'Topic :: Utilities',", "description=\"ACME client\", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author=\"Certbot Project\", author_email='<EMAIL>', license='Apache License 2.0',", "Language :: Python :: 2', 'Programming Language :: Python ::", "Status :: 3 - Alpha', 'Environment :: Console', 'Environment ::" ]
[ "body += '\\nJIRA ticket: %s' % (url) to_addr = self.rule['email']", "'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project',", "don't have on our radar # 2. A custom field", "'&amp;') body = body.replace('<', '&lt;') body = body.replace('>', '&gt;') return", "= list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg = '%s Both", "self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields': []", "self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token =", "url in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,", "self.rule[\"caller_id\"] } try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers,", "self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file'", "return None if len(issues): return issues[0] def comment_on_ticket(self, ticket, match):", "'type' in field['schema']): raise Exception(\"Could not determine schema information for", "match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = \"This alert was", "'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels',", "ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] = title self.jira_args['description']", "def get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): \"\"\"", "self.exotel_auth_token) try: message_body = self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'],", "proper headers headers = { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" }", "\"\"\" def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if", "unicode(value) value_str.replace('\\\\n', '\\n') if type(value) in [list, dict]: try: value_str", "%s' % (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match)", "required to be provided as an object. if 'custom' in", "matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated", "else: root[i] = self.resolve_rule_reference(item) elif type(root) == dict: # Make", "search version if 'alert_subject' not in self.rule: title = self.create_default_title(matches,", "a single label. This allows us to maintain backwards compatibility", "support only a single component. This allows us to maintain", "= self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None)", "cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as", "events found.\\n' else: top_events.sort(key=lambda x: x[1], reverse=True) for term, count", "self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg", "= self.labels if self.watchers: # Support single watcher or list", "self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload,", "each alert \"\"\" required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule):", "This technically may not work if there is a top-level", "incident on PagerDuty for each alert \"\"\" required_options = frozenset(['pagerduty_service_key',", "'ElastAlert: %s' % (self.rule['name']) if for_search: return title title +=", "as e: # JIRAError may contain HTML, pass along only", "in value] # Also attempt to handle arrays of complex", "= {'https': self.slack_proxy} if self.slack_proxy else None payload = {", "ServiceNowAlerter(Alerter): \"\"\" Creates a ServiceNow alert \"\"\" required_options = set([", "def alert(self, matches): body = self.create_alert_body(matches) # post to pagerduty", "alert title to be used, e.g. as an e-mail subject", "json import logging import subprocess import sys import warnings from", "= matches[0].get(self.rule['query_key']) if qk: subject += ' - %s' %", "it was provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy else", "be modifying the contents of the structure we're walking for", "%s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match']", "the contents of the structure we're walking for i, item", "watcher) except Exception as ex: # Re-raise the exception, preserve", "to this alert. At minimum, this should contain a field", "self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert: %s' % (self.rule['name']) if", "'jira_project', 'jira_issuetype']) # Maintain a static set of built-in fields", "self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) # You can", "alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] =", "HipChat headers = {'content-type': 'application/json'} # set https proxy, if", "jira_field[5:].replace('_', ' ').lower() # All jira fields should be found", "self.hipchat_proxy else None payload = { 'color': self.hipchat_msg_color, 'message': body,", "= self.rule.get('slack_text_string', '') def format_body(self, body): # https://api.slack.com/docs/formatting body =", "JIRA ticket using jira_args (%s): %s\" % (self.jira_args, e)) elastalert_logger.info(\"Opened", "Handle non-array types else: # Simple string types if arg_type", "[list, dict]: try: value_str = self._pretty_print_as_json(value) except TypeError: # Non", "self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for %s at", "self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as", "and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s matched %s' %", "may be modifying the contents of the structure we're walking", "self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as e: # JIRAError may", "self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify", "alert \"\"\" required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter,", "self.rule['exotel_to_number'], message_body) if response != 200: raise EAException(\"Error posting to", "\"\"\" Send a Telegram message via bot api for each", "'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ])", "for x, y in self.match.items() if not x.startswith('top_events_')]) json_blob =", "the alert. :param match: A dictionary of relevant information to", "'..(truncated)' # Use appropriate line ending for text/html if self.hipchat_message_format", "= { 'message': body, 'level': self.gitter_msg_level } try: response =", "= self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url = [post_url] self.post_url =", "yaml_loader(account_file) if 'user' not in account_conf or 'password' not in", "f[identifier].replace('_', ' ').lower()), None) if field: break if not field:", "= self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches): \"\"\" Each match", "except JIRAError as e: logging.exception(\"Error while searching for JIRA ticket", "alert \"\"\" required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule):", "each alert \"\"\" required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule):", "payload = { \"description\": description, \"short_description\": self.rule['short_description'], \"comments\": self.rule['comments'], \"assignment_group\":", "\"\"\" Creates a Gitter activity message for each alert \"\"\"", "provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None payload", "u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object): \"\"\" Base class for", "it isn't bcc = self.rule.get('bcc') if bcc and isinstance(bcc, basestring):", "component or list if type(self.components) != list: self.jira_args['components'] = [{'name':", "alert. \"\"\" raise NotImplementedError() def get_info(self): \"\"\" Returns a dictionary", "-*- coding: utf-8 -*- import copy import datetime import json", "%s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert", "= field['id'] # Check the schema information to decide how", "count) self.text += '\\n' def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda", "match_items = dict([(x, y) for x, y in self.match.items() if", "from util import lookup_es_key from util import pretty_ts from util", "else val for val in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject", "VictorOps: %s\" % e) elastalert_logger.info(\"Trigger sent to VictorOps\") def get_info(self):", "sent to Gitter\") def get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url}", "smtplib import SMTPAuthenticationError from smtplib import SMTPException from socket import", "% (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This API returns metadata", "= self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue = unicode(value) if strValue.startswith('$')", "are aware of, it means it is either: # 1.", "None try: response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers,", "format_body(self, body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`',", "entirely? Probably the latter... raise Exception(\"Could not find a definition", "else: return value def alert(self, match): \"\"\" Send an alert.", "if count: title += ' - %s+ events' % (count)", "self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'):", "self.rule['command']] else: command = [command_arg % matches[0] for command_arg in", "obj) class BasicMatchString(object): \"\"\" Creates a string containing fields in", "def _add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', ''))", "we're walking for i, item in enumerate(copy.copy(root)): if type(item) ==", "user and password information. \"\"\" account_conf = yaml_loader(account_file) if 'user'", "= {} for match in matches: if qk in match:", "self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if '@'", "to handle arrays of complex types that have to be", "\"\"\" required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a", "account_file: Name of the file which contains user and password", "Exception as ex: # Re-raise the exception, preserve the stack-trace,", "command_arg in self.rule['command']] self.last_command = command except KeyError as e:", "self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True)", "proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None payload =", "self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): \"\"\" Create an incident on", "self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number']", "% (self.telegram_api_url, self.telegram_bot_token, \"sendMessage\") self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self,", "u'⚠ *%s* ⚠ ```\\n' % (self.create_title(matches)) for match in matches:", "to_addr = self.rule['email'] if 'email_from_field' in self.rule: recipient = lookup_es_key(matches[0],", "'level': self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers,", "# If we find a field that is not covered", "subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches,", "class DebugAlerter(Alerter): \"\"\" The debug alerter uses a Python logger", "alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING VALUE>' if val is", "= SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host,", "except RequestException as e: raise EAException(\"Error posting to ServiceNow: %s\"", "self.rule.get('victorops_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # post", "def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell =", "be 'key', 'id', 'value', or something else # If it", "fields in match for the given rule. \"\"\" def __init__(self,", "= int(value) elif arg_type == 'option': self.jira_args[arg_name] = {'value': value}", "self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region =", "are likely others that will need to be updated on", "- %s+ events' % (count) return title def get_info(self): return", "isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override", "trigger a POST to the specified endpoint(s). \"\"\" for match", "Also attempt to handle arrays of complex types that have", "\"Exception encountered when trying to add '{0}' as a watcher.", "an account file. :param account_file: Name of the file which", "body = self.create_alert_body(matches) # post to Gitter headers = {'content-type':", "super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key", "'key' elif array_items == 'option': self.jira_args[arg_name] = [{'value': v} for", "for term, count in top_events: self.text += '%s: %s\\n' %", "import warnings from email.mime.text import MIMEText from email.utils import formatdate", "match[qk] else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match,", "= self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token']", "if isinstance(self.rule['command'], basestring): self.shell = True if '%' in self.rule['command']:", "if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else:", "JIRAError as e: raise EAException(\"Error creating JIRA ticket using jira_args", "to PagerDuty\") def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0],", "missing = '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args'", "'jira_watchers', ] # Some built-in jira types that can be", "to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e: raise EAException(\"Error posting to", "self.labels if self.watchers: # Support single watcher or list if", "= {'https': self.hipchat_proxy} if self.hipchat_proxy else None payload = {", "None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'],", "raise EAException(\"Error posting HTTP Post alert: %s\" % e) elastalert_logger.info(\"HTTP", "dashes if len(matches) > 1: body += '\\n----------------------------------------\\n' return body", "raise EAException(\"Error creating JIRA ticket using jira_args (%s): %s\" %", "self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy',", "that have to be passed as objects with an identifier", "and status in (%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql", "response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response != 200: raise", "self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg", "created by ElastAlerter.send_alert() # and attached to each alerters used", "= self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy',", "matches): body = self.description + '\\n' body += self.get_aggregation_summary_text(matches) for", "only provides # a single value for a multi-value field", "self.client.add_comment(ticket, comment) def alert(self, matches): title = self.create_title(matches) if self.bump_tickets:", "import EAException from util import elastalert_logger from util import lookup_es_key", "Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There", "self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' %", "MIMEText from email.utils import formatdate from smtplib import SMTP from", "an API error that will bubble up self.jira_args[arg_name] = [{'name':", "self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s'", "code is %s\" % response) except: raise EAException(\"Error posting to", "[lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] # Populate values with", "self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response", "= self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To']", "'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self, rule):", "version if 'alert_subject' not in self.rule: title = self.create_default_title(matches, True)", "!= list: self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components'] = [{'name':", "required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url", "= account_conf['password'] class StompAlerter(Alerter): \"\"\" The stomp alerter publishes alerts", "frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a static set of", "def get_account(self, account_file): \"\"\" Gets the username and password from", "we couldn't find that type? # OR raise and fail", "string types if array_items in ['string', 'date', 'datetime']: # Special", "unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: body += '\\n----------------------------------------\\n' return", "[recipient + self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr = recipient if", "\"\"\" required_options = frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url", "'&gt;') return body def alert(self, matches): body = self.create_alert_body(matches) body", "alert_text_values[i] = alert_value alert_text_values = [missing if val is None", "in self.rule and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if", "self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number']", "MsTeamsAlerter(Alerter): \"\"\" Creates a Microsoft Teams Conversation Message for each", "= self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy =", "the rule has a query_key, add that value plus timestamp", "of alerts. :param rule: The rule configuration. \"\"\" required_options =", "for i in xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value =", "This blob contains non-unicode, so lets pretend it's Latin-1 to", "= summary_table_fields + ['count'] text += \"Aggregation resulted in the", "if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text", "top-level rule property with the same name # as an", "basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self,", "'/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage))", "of relevant information to the alert. \"\"\" raise NotImplementedError() def", "A dictionary of relevant information to the alert. \"\"\" raise", "elif array_items == 'option': self.jira_args[arg_name] = [{'value': v} for v", "ensure_ascii=False) except UnicodeDecodeError: # This blob contains non-unicode, so lets", "logging.exception(\"Error while searching for JIRA ticket using jql '%s': %s\"", "body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if", "datetime import json import logging import subprocess import sys import", "alert(self, matches): alerts = [] qk = self.rule.get('query_key', None) fullmessage", "sent to MS Teams\") def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url':", "- datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~\"%s\" and created >=", "if isinstance(recipient, basestring): if '@' in recipient: to_addr = [recipient]", "lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s at %s:' %", "self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width',", "exotel import Exotel from jira.client import JIRA from jira.exceptions import", "priority index to id. \"\"\" priorities = self.client.priorities() self.priority_ids =", "item in enumerate(copy.copy(root)): if type(item) == dict or type(item) ==", "self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token,", "via bot api for each alert \"\"\" required_options = frozenset(['telegram_bot_token',", "= formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr", "pretty_ts from util import ts_now from util import ts_to_dt class", "if self.components: # Support single component or list if type(self.components)", "action if self.watchers: for watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher)", "= body.replace('>', '&gt;') return body def alert(self, matches): body =", "return {'type': 'jira'} class CommandAlerter(Alerter): required_options = set(['command']) def __init__(self,", "self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self, matches): for match", "%s: %s\" % (' '.join(command), e)) def get_info(self): return {'type':", "EmailAlerter(Alerter): \"\"\" Sends an email alert \"\"\" required_options = frozenset(['email'])", "set # For anything else, we will do best-effort and", "self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key')", "self.priority_ids = {} for x in range(len(priorities)): self.priority_ids[x] = priorities[x].id", "matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name'] +", "assignee): self.assignee = assignee if assignee: self.jira_args['assignee'] = {'name': assignee}", "field or 'type' in field['schema']): raise Exception(\"Could not determine schema", "count for spikes count = matches[0].get('spike_count') if count: title +=", "= [{'name': self.components}] else: self.jira_args['components'] = [{'name': component} for component", "alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw = {} for name,", "title = self.create_default_title(matches, True) else: title = self.create_title(matches) if 'jira_ignore_in_title'", "False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body =", "if 'email_add_domain' in self.rule: to_addr = [name + self.rule['email_add_domain'] for", "\"\"\" def alert(self, matches): qk = self.rule.get('query_key', None) for match", "is None else val for val in alert_subject_values] return alert_subject.format(*alert_subject_values)", "# Number type elif arg_type == 'number': self.jira_args[arg_name] = int(value)", "and attached to each alerters used by a rule before", "to be ok title = title.replace(' - ', ' ')", "self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width", "self.match = match def _ensure_new_line(self): while self.text[-2:] != '\\n\\n': self.text", "post to pagerduty headers = {'content-type': 'application/json'} payload = {", "self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url = [post_url] self.post_url = post_url", "'array': # As a convenience, support the scenario wherein the", "%s\" % e) elastalert_logger.info(\"Alert sent to Gitter\") def get_info(self): return", "%s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return", "self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support referencing other top-level rule", "rule: The rule configuration. \"\"\" required_options = frozenset([]) def __init__(self,", "JIRA issue summary. :param matches: A list of dictionaries of", "for v in value] else: # Try setting it as", "= self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s'", "self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields =", "field['schema']['items'] # Simple string types if array_items in ['string', 'date',", "serializable object, fallback to str pass self.text += '%s: %s\\n'", "in self.rule: to_addr = [name + self.rule['email_add_domain'] for name in", "\"\"\" required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches):", "we find a field that is not covered by the", "elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match in", "raise Exception( \"Exception encountered when trying to add '{0}' as", "pass along only first 1024 chars raise EAException(\"Error connecting to", "summary~\"%s\" and created >= \"%s\"' % (self.project, title, date) if", "there is a bcc then also convert it to a", "# Support single label or list if type(self.labels) != list:", "{'name': self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields()", "'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): \"\"\" Creates a VictorOps Incident for", "None payload = { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown',", "self.victorops_proxy else None payload = { \"message_type\": self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name,", "jql = '%s and status not in (%s)' % (jql,", "= u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object): \"\"\" Base class", "self.rule['email_add_domain'] for name in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject']", "if it was provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy", "= self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) # You can not", "= self.client.create_issue(**self.jira_args) # You can not add watchers on initial", "\"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There are likely others that will need to", "we explicitly know how to set # For anything else,", "self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated)", "TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule):", "sent to ServiceNow\") def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url}", "watcher failed to be added raise Exception( \"Exception encountered when", "configured if jira_field.startswith('jira_') and jira_field not in self.known_field_list: # Remove", "= pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = \"This alert was triggered again", "!= list: value = [value] array_items = field['schema']['items'] # Simple", "self.slack_webhook_url} class PagerDutyAlerter(Alerter): \"\"\" Create an incident on PagerDuty for", "get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): \"\"\" Creates", "frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches): alerts = []", "Creates a mapping of priority index to id. \"\"\" priorities", "'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text':", "provided proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None payload", "self.servicenow_proxy} if self.servicenow_proxy else None payload = { \"description\": description,", "from smtplib import SMTPException from socket import error import boto3", "list if type(self.labels) != list: self.labels = [self.labels] self.jira_args['labels'] =", "we can see at a glance how many of each", "self.labels = [self.labels] self.jira_args['labels'] = self.labels if self.watchers: # Support", "set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell", "[] self.shell = False if isinstance(self.rule['command'], basestring): self.shell = True", "'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response =", "in xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i]) if", "subject or JIRA issue summary. :param matches: A list of", "isn't cc = self.rule.get('cc') if cc and isinstance(cc, basestring): self.rule['cc']", "actually be 'key', 'id', 'value', or something else # If", "= self.client.priorities() self.priority_ids = {} for x in range(len(priorities)): self.priority_ids[x]", "can see at a glance how many of each aggregation_key", "raise EAException(\"Error while running command %s: %s\" % (' '.join(command),", "a list if it isn't bcc = self.rule.get('bcc') if bcc", "shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) + '\\n' stdout,", "[missing if val is None else val for val in", "email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'):", "information. \"\"\" account_conf = yaml_loader(account_file) if 'user' not in account_conf", "if ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >=", "and arguments try: if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg", "self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id", "NotImplementedError() def get_info(self): \"\"\" Returns a dictionary of data related", "from jira.exceptions import JIRAError from requests.exceptions import RequestException from staticconf.loader", "referencing other top-level rule properties # This technically may not", "get_info(self): \"\"\" Returns a dictionary of data related to this", "return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args'", "it isn't cc = self.rule.get('cc') if cc and isinstance(cc, basestring):", "%s\" % e) elastalert_logger.info(\"HTTP Post alert sent.\") def get_info(self): return", "import RequestException from staticconf.loader import yaml_loader from texttable import Texttable", "self.bump_in_statuses: jql = '%s and status in (%s)' % (jql,", "as e: raise EAException(\"Error formatting command: %s\" % (e)) #", "self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key',", "'') def alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body", "aggregation so that we can see at a glance how", "to ms teams: %s\" % e) elastalert_logger.info(\"Alert sent to MS", "'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): \"\"\" Creates a ServiceNow alert \"\"\"", "warnings from email.mime.text import MIMEText from email.utils import formatdate from", "list): to_addr = recipient if 'email_add_domain' in self.rule: to_addr =", "def get_priorities(self): \"\"\" Creates a mapping of priority index to", "might actually be 'key', 'id', 'value', or something else #", "raise Exception(\"Could not determine schema information for the jira field", "__init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring):", "self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number", "super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type", "types like strings or numbers if arg_type == 'array': #", "from twilio.rest import Client as TwilioClient from util import EAException", "type(root) == dict: # Make a copy since we may", "self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\", \"state_message\": body } try: response = requests.post(self.url,", "if (len(body) > 9999): body = body[:9980] + '..(truncated)' #", "self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type']", "to Gitter\") def get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class", "None payload = { \"message_type\": self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\",", "walking for key, value in root.copy().iteritems(): if type(value) == dict", "self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>')", "rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key", "self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args =", "attempt to handle arrays of complex types that have to", "says that these are strings, but # in reality, they", "a field type corresponding to the type of Alerter. \"\"\"", "self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both", "= frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token =", "def alert(self, matches): body = self.create_alert_body(matches) # post to Gitter", "if self.pipeline is not None and 'jira_ticket' in self.pipeline: url", "= self._pretty_print_as_json(value) except TypeError: # Non serializable object, fallback to", "in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v}", "in recipient: to_addr = [recipient] elif 'email_add_domain' in self.rule: to_addr", "None self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting on existing ticket", "(e)) # Run command and pipe data try: subp =", "set of built-in fields that we explicitly know how to", "name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee", "= boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns')", "key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] =", "required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches): alerts", "self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise EAException(\"Error", "'1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match,", "%s not found. Valid priorities are %s\" % (self.priority, self.priority_ids.keys()))", "def resolve_rule_references(self, root): # Support referencing other top-level rule properties", "\"'\") body = \"```{0}```\".format('```\\n\\n```'.join(x for x in body.split('\\n'))).replace('\\n``````', '') return", "+= alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for", "single label or list if type(self.labels) != list: self.labels =", "# This API returns metadata about all the fields defined", "if it was provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy", "so lets pretend it's Latin-1 to show something return json.dumps(blob,", "a static set of built-in fields that we explicitly know", "priorities are %s\" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This", "= self.create_alert_body(matches) body = self.format_body(body) # post to slack headers", "or list if type(self.labels) != list: self.labels = [self.labels] self.jira_args['labels']", "'.join(command))) except OSError as e: raise EAException(\"Error while running command", "to pagerduty: %s\" % e) elastalert_logger.info(\"Trigger sent to PagerDuty\") def", "except RequestException as e: raise EAException(\"Error posting to HipChat: %s\"", "have been matched in the lookup_es_key call above if val", "def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token =", "# \"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There are likely", "ticket %s: %s\" % (ticket, e)) if self.pipeline is not", "> 1: body += '\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self, matches):", "fullmessage = {} for match in matches: if qk in", "list: self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components'] = [{'name': component}", "!= list: self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee'] = {'name':", "if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now()", "self.rule['command']] self.last_command = command except KeyError as e: raise EAException(\"Error", "pagerduty headers = {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key,", "+= '%s: %s\\n' % (key, value_str) def _pretty_print_as_json(self, blob): try:", "to be added raise Exception( \"Exception encountered when trying to", "component in self.components] if self.labels: # Support single label or", "Telegram message via bot api for each alert \"\"\" required_options", "PagerDutyAlerter(Alerter): \"\"\" Create an incident on PagerDuty for each alert", "We used to support only a single label. This allows", "to maintain backwards compatibility # while also giving the user-facing", "= self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value incident_key_values = ['<MISSING", "single component. This allows us to maintain backwards compatibility #", "self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self, matches):", "alert \"\"\" required_options = set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments',", "set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci',", "self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self, matches): #", "1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys, count", "command and pipe data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)", "= to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr +", "def get_info(self): \"\"\" Returns a dictionary of data related to", "maintain backwards compatibility # while also giving the user-facing API", "with the same name # as an es result key,", "proxy, if it was provided proxies = {'https': self.hipchat_proxy} if", "in alert_subject_args] # Support referencing other top-level rule properties #", "keys, count in match_aggregation.iteritems(): text_table.add_row([key for key in keys] +", "array_items = field['schema']['items'] # Simple string types if array_items in", "= body.replace('\\n', '<br />') # Post to HipChat headers =", "a Slack room message for each alert \"\"\" required_options =", "in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for v in value]", "comment) def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket", "type(self.watchers) != list: self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee'] =", "requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e:", "\"\"\" Creates a VictorOps Incident for each alert \"\"\" required_options", "except RequestException as e: raise EAException(\"Error posting to Telegram: %s\"", "def create_default_title(self, matches): return self.rule['name'] def get_account(self, account_file): \"\"\" Gets", "self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr =", "is None else val for val in alert_text_values] alert_text =", "if type(value) in [list, dict]: try: value_str = self._pretty_print_as_json(value) except", "+ self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr = recipient if 'email_add_domain'", "or something else # If it works, great! If not,", "None) def alert(self, matches): body = self.create_alert_body(matches) # HipChat sends", "matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text)", "%s\" % e) elastalert_logger.info( \"Alert sent to Telegram room %s\"", "if array_items in ['string', 'date', 'datetime']: # Special case for", "} if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji']", "JIRAError as e: logging.exception(\"Error while searching for JIRA ticket using", "= set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command = []", "def _pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)", "display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy", "on existing ticket %s' % (ticket.key)) for match in matches:", "top_events: self.text += 'No events found.\\n' else: top_events.sort(key=lambda x: x[1],", "rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format',", "if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password)", "matches): return self.rule['name'] def get_account(self, account_file): \"\"\" Gets the username", "e: raise EAException(\"Error connecting to SMTP host: %s\" % (e))", "VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule: alert_text_args", "\"\"\" required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule)", "self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { \"information\":", "in self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)", "use only one or the other.' logging.warning(msg) self.jira_args = {'project':", "= self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body = body.encode('UTF-8') if", "self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] =", "bcc and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain')", "to %s\" % (to_addr)) def create_default_title(self, matches): subject = 'ElastAlert:", "'alert_text' not in self.rule: self.text += self.rule['name'] + '\\n\\n' self._add_custom_alert_text()", "self.rule.get('smtp_cert_file') # Convert email to a list if it isn't", "in keys] + [count]) text += text_table.draw() + '\\n\\n' return", "email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr", "= self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False)", "'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \\", "return body def alert(self, matches): body = self.create_alert_body(matches) body =", "self.ms_teams_proxy else None payload = { '@type': 'MessageCard', '@context': 'http://schema.org/extensions',", "self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name", "= stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def", "TwilioRestException from twilio.rest import Client as TwilioClient from util import", "e: raise EAException(\"Error posting to HipChat: %s\" % e) elastalert_logger.info(\"Alert", "alert_subject def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for match in", "def format_body(self, body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body =", "self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url,", "{}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches): \"\"\"", "set that we are aware of, it means it is", "es result key, since it would have been matched in", "self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body", "def create_title(self, matches): \"\"\" Creates custom alert title to be", "using jql '%s': %s\" % (jql, e)) return None if", "matched in the lookup_es_key call above for i in xrange(len(alert_text_values)):", "Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There are likely others that will", "dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')])", "body, 'mrkdwn_in': ['text', 'pretext'], 'fields': [] } ] } if", ") fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts", "{} for match in matches: if qk in match: elastalert_logger.info(", "giving the user-facing API a more representative name self.components =", "self.description = self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age',", "val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self):", "not in self.known_field_list: # Remove the jira_ part. Convert underscores", "exit code while running command %s\" % (' '.join(command))) except", "Exception(\"Could not determine schema information for the jira field '{0}'\".format(normalized_jira_field))", "CommandAlerter(Alerter): required_options = set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to ms", "% (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match) except", "name, kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) # Support", "The rule configuration. \"\"\" required_options = frozenset([]) def __init__(self, rule):", "one or the other.' logging.warning(msg) self.jira_args = {'project': {'key': self.project},", "on initial creation. Only as a follow-up action if self.watchers:", "self.pagerduty_proxy else None try: response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder,", "required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter,", "else None payload = { 'message': body, 'level': self.gitter_msg_level }", "return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): \"\"\" Creates a", "field '{0}'\".format(normalized_jira_field)) arg_name = field['id'] # Check the schema information", "JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x, y) for x, y", "return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): \"\"\" Requested elasticsearch", "KeyError: logging.error(\"Priority %s not found. Valid priorities are %s\" %", "body.replace('`', \"'\") body = \"```{0}```\".format('```\\n\\n```'.join(x for x in body.split('\\n'))).replace('\\n``````', '')", "import error import boto3 import requests import stomp from exotel", "also convert it a list if it isn't cc =", "which watcher failed to be added raise Exception( \"Exception encountered", "'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port", "= self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override',", "'fields': [] } ] } if self.slack_icon_url_override != '': payload['icon_url']", "if it was provided proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy", "payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key)", "assignee if assignee: self.jira_args['assignee'] = {'name': assignee} elif 'assignee' in", "if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include a", "self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override =", "for multi-select custom types (the JIRA metadata says that these", "None) def alert(self, matches): body = self.create_alert_body(matches) # post to", "and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name]", "% ( msg, ','.join(intersection)) msg += ' This should be", "key # This may not work, as the key might", "in [list, dict]: try: value_str = self._pretty_print_as_json(value) except TypeError: #", "self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self, body): #", "} proxies = {'https': self.post_proxy} if self.post_proxy else None for", "frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color',", "certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port:", "RequestException as e: raise EAException(\"Error posting to Gitter: %s\" %", "self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix and not", "= \"```{0}```\".format('```\\n\\n```'.join(x for x in body.split('\\n'))).replace('\\n``````', '') return body def", "if 'email_from_field' in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient,", "the fields defined on the jira server (built-ins and custom", "[command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else: command = [command_arg %", "command = [command_arg % matches[0] for command_arg in self.rule['command']] self.last_command", "field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for v in", "to slack: %s\" % e) elastalert_logger.info(\"Alert sent to Slack\") def", "% \\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if", "inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if", "Support single component or list if type(self.components) != list: self.jira_args['components']", "posting HTTP Post alert: %s\" % e) elastalert_logger.info(\"HTTP Post alert", "'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color':", "smtplib import SMTPException from socket import error import boto3 import", "KeyError as e: raise EAException(\"Error formatting command: %s\" % (e))", "a case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ]", "something else # If it works, great! If not, it", "title if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title =", "self.post_payload) def alert(self, matches): \"\"\" Each match will trigger a", "% (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule,", "that we are aware of, it means it is either:", "{'value': value} else: self.jira_args[arg_name] = value # Number type elif", "if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] =", "self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name') self.url =", "if for_search: return title title += ' - %s' %", "alert. \"\"\" if 'alert_subject' in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches)", "the lookup_es_key call above if val is None: val =", "== 'number': self.jira_args[arg_name] = int(value) elif arg_type == 'option': self.jira_args[arg_name]", "return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): \"\"\" Creates a", "(datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~\"%s\" and created", "payload = { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview':", "*args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl',", "properties # This technically may not work if there is", "field: break if not field: # Log a warning to", "field '{0}'\".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle arrays of simple", "create_default_title(self, matches): return self.rule['name'] def get_account(self, account_file): \"\"\" Gets the", "+= '%s: %s\\n' % (term, count) self.text += '\\n' def", "self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype']", "Special case for multi-select custom types (the JIRA metadata says", "was provided proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None", "required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token", "will trigger a POST to the specified endpoint(s). \"\"\" for", "return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): \"\"\" Creates a", "= self.match.items() match_items.sort(key=lambda x: x[0]) for key, value in match_items:", "%s\" % response) except: raise EAException(\"Error posting to Exotel\"), None,", "self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number']", "built-in fields that we explicitly know how to set #", "set a string value known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity',", "== list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif type(root) ==", "strValue[1:-1] in self.rule: if type(value) == int: return int(self.rule[strValue[1:-1]]) else:", "Handle arrays of simple types like strings or numbers if", "'key', 'id', 'value', or something else # If it works,", "self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self, matches): body = self.description", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to slack:", "Base class for types of alerts. :param rule: The rule", "e: raise EAException(\"Error posting to Telegram: %s\" % e) elastalert_logger.info(", "(self.rule['name']) if for_search: return title title += ' - %s'", "def __init__(self, rule): self.rule = rule # pipeline object is", "= command except KeyError as e: raise EAException(\"Error formatting command:", "fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body'] =", "one of them: # {\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My Custom", "None) if field: break if not field: # Log a", "ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity)", "as e: raise EAException(\"Error posting to ms teams: %s\" %", "(%s): %s\" % (self.jira_args, e)) elastalert_logger.info(\"Opened Jira ticket: %s\" %", "match_aggregation.iteritems(): text_table.add_row([key for key in keys] + [count]) text +=", "Microsoft Teams Conversation Message for each alert \"\"\" required_options =", "basestring): post_url = [post_url] self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy')", "lookup_es_key(match, es_key) headers = { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" }", "def resolve_rule_reference(self, value): strValue = unicode(value) if strValue.startswith('$') and strValue.endswith('$')", "jira types that can be used as custom fields require", "\"\"\" Creates a ServiceNow alert \"\"\" required_options = set([ 'username',", "key encountered in the aggregation period for match in matches:", "% e) elastalert_logger.info( \"Alert sent to Telegram room %s\" %", "same name # as an es result key, since it", "json.JSONEncoder.default(self, obj) class BasicMatchString(object): \"\"\" Creates a string containing fields", "obj): if hasattr(obj, 'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self, obj)", "following data for summary_table_fields ==> {0}:\\n\\n\".format( summary_table_fields_with_count ) text_table =", "self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')", "payload['themeColor'] = self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try: response =", "def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): \"\"\"", "['<MISSING VALUE>' if val is None else val for val", "information is not available, raise an exception since we don't", "if self.bump_in_statuses: jql = '%s and status in (%s)' %", "__init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name']", "self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self,", "Some built-in jira types that can be used as custom", "= self.rule['jira_issuetype'] # We used to support only a single", "required_options = frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn =", "'\\n') if type(value) in [list, dict]: try: value_str = self._pretty_print_as_json(value)", "= [self.rule['cc']] # If there is a bcc then also", "if 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match,", "schema information to decide how to set the value correctly", "= {'value': value} # Complex type else: self.jira_args[arg_name] = {'name':", "alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches): body =", "alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key,", "%s' % (self.rule['name']) return subject def alert(self, matches): body =", "preserve the stack-trace, and give some # context as to", "self.new_style_string_format = False if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format", "def get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter): \"\"\" Sends an", "'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': {", "and pipe data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if", "util import lookup_es_key from util import pretty_ts from util import", "{'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token',", "'\\n\\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if", "payload = { '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title':", "= frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn',", "command except KeyError as e: raise EAException(\"Error formatting command: %s\"", "of, it means it is either: # 1. A built-in", "# OR raise and fail to load the alert entirely?", "of the file which contains user and password information. \"\"\"", "try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,", "'61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination", "==> {0}:\\n\\n\".format( summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation =", "in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] +", "from_=self.twilio_from_number) except TwilioRestException as e: raise EAException(\"Error posting to twilio:", "built-in types, id: issuekey and id: thumbnail if not ('schema'", "dict or type(item) == list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item)", "'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): \"\"\" Creates a Slack room message", "'%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\\nJIRA ticket: %s' %", "the 'name' field. Therefore, try both just in case for", "self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError: logging.error(\"Priority %s not found.", "self.labels: # Support single label or list if type(self.labels) !=", "# Simple string types if array_items in ['string', 'date', 'datetime']:", "self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] =", "Python logger (by default, alerting to terminal). \"\"\" def alert(self,", "https proxy, if it was provided proxies = {'https': self.ms_teams_proxy}", "is a sample of one of them: # {\"id\":\"customfield_12807\",\"name\":\"My Custom", "self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color for url in self.ms_teams_webhook_url:", "inactivity_datetime: if self.pipeline is not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server']", "'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s", "'jira_issuetype']) # Maintain a static set of built-in fields that", "if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] =", "return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid',", "['count'] text += \"Aggregation resulted in the following data for", "__init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token']", "posting to Exotel, response code is %s\" % response) except:", "body.replace('>', '&gt;') return body def alert(self, matches): body = self.create_alert_body(matches)", "elastalert_logger.info(\"Alert sent to MS Teams\") def get_info(self): return {'type': 'ms_teams',", "arg in self.pagerduty_incident_key_args] # Populate values with rule level properties", "Send a Telegram message via bot api for each alert", "in ['string', 'date', 'datetime']: # Special case for multi-select custom", "may contain HTML, pass along only first 1024 chars raise", "'message': body, 'level': self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url, json.dumps(payload,", "%s' % (self.rule['name']) # If the rule has a query_key,", "while commenting on ticket %s: %s\" % (ticket, e)) if", "'issuetype': {'name': self.issue_type}} if self.components: # Support single component or", "matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args =", "= self.client.search_issues(jql) except JIRAError as e: logging.exception(\"Error while searching for", "if 'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0],", "%s\" % (jql, e)) return None if len(issues): return issues[0]", "longer than 10000 characters if (len(body) > 9999): body =", "self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if", "and self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self, matches): # Format", "else: # Simple string types if arg_type in ['string', 'date',", "case for multi-select custom types (the JIRA metadata says that", "try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder),", "ticket using jira_args (%s): %s\" % (self.jira_args, e)) elastalert_logger.info(\"Opened Jira", "= self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override',", "self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body,", "file. :param account_file: Name of the file which contains user", "self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers =", "# As a convenience, support the scenario wherein the user", "level properties too for i in range(len(incident_key_values)): if incident_key_values[i] is", "properties too for i in range(len(incident_key_values)): if incident_key_values[i] is None:", "return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): \"\"\" Creates a", "for match in matches: # Parse everything into description. description", "matches): alerts = [] qk = self.rule.get('query_key', None) fullmessage =", "work, as the key might actually be 'key', 'id', 'value',", "may not work if there is a top-level rule property", "title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is necessary for", "was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None", "to ServiceNow\") def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class", "self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age", "import stomp from exotel import Exotel from jira.client import JIRA", "'disable_web_page_preview': True } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,", "ex )), None, sys.exc_info()[2] except JIRAError as e: raise EAException(\"Error", "Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There are", "value_str = self._pretty_print_as_json(value) except TypeError: # Non serializable object, fallback", "explicitly know how to set # For anything else, we", "= body.encode('UTF-8') body = body.replace('&', '&amp;') body = body.replace('<', '&lt;')", "= u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches, for_search=False): # If", "Create an incident on PagerDuty for each alert \"\"\" required_options", "or JIRA issue summary. :param matches: A list of dictionaries", "information to the alert. \"\"\" if 'alert_subject' in self.rule: return", "command %s\" % (' '.join(command))) except OSError as e: raise", "referencing other top-level rule properties to avoid redundant copy/paste if", "JIRA ticket if it exists if self.pipeline is not None", "email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC']", "'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try:", "self.match.items(): if key.startswith('top_events_'): self.text += '%s:\\n' % (key[11:]) top_events =", "\"\"\" account_conf = yaml_loader(account_file) if 'user' not in account_conf or", "'@' in recipient: to_addr = [recipient] elif 'email_add_domain' in self.rule:", "if self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try: self.client = JIRA(self.server,", "return issues[0] def comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match))", "a Microsoft Teams Conversation Message for each alert \"\"\" required_options", "account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter): \"\"\" The stomp alerter", "property with the same name # as an es result", "%s\" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent email to", "for text/html if self.hipchat_message_format == 'html': body = body.replace('\\n', '<br", "Message for each alert \"\"\" required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def", "self.rule.get('exotel_message_body', '') def alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try:", "ServiceNow\") def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter):", "matches): body = self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region,", "to MS Teams\") def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url}", "self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers')", "self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self, body): # https://api.slack.com/docs/formatting body", "if type(item) == dict or type(item) == list: self.resolve_rule_references(root[i]) else:", "created >= \"%s\"' % (self.project, title, date) if self.bump_in_statuses: jql", "matches): subject = 'ElastAlert: %s' % (self.rule['name']) return subject def", "As such, no tickets will ever be found.' % (", "the jira field '{0}'\".format(normalized_jira_field)) arg_name = field['id'] # Check the", "body.split('\\n'))).replace('\\n``````', '') return body def alert(self, matches): body = self.create_alert_body(matches)", "elastalert_logger.info(\"Sent email to %s\" % (to_addr)) def create_default_title(self, matches): subject", "self.jira_args[arg_name] = [int(v) for v in value] # Also attempt", "self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status()", "'pretext'], 'fields': [] } ] } if self.slack_icon_url_override != '':", "twilio.base.exceptions import TwilioRestException from twilio.rest import Client as TwilioClient from", "sns notification to %s\" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): \"\"\" Creates", "self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'):", "= 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy',", "\"\"\" Each match will trigger a POST to the specified", "- ', ' ') title = title.replace('\\\\', '\\\\\\\\') date =", "return {'type': 'Unknown'} def create_title(self, matches): \"\"\" Creates custom alert", "= self.resolve_rule_reference(item) elif type(root) == dict: # Make a copy", "ts_now from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj):", "something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self):", "self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display", "resolve_rule_references(self, root): # Support referencing other top-level rule properties to", "self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url = [post_url]", "_add_top_counts(self): for key, counts in self.match.items(): if key.startswith('top_events_'): self.text +=", "'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try:", "self.post_all_values else {} payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items(): payload[post_key]", "_pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except", "in ['string', 'date', 'datetime']: # Special case for custom types", "EAException(\"Error posting to twilio: %s\" % e) elastalert_logger.info(\"Trigger sent to", "if self.labels: # Support single label or list if type(self.labels)", "email.mime.text import MIMEText from email.utils import formatdate from smtplib import", "by the set that we are aware of, it means", "EAException(\"Error while running command %s: %s\" % (' '.join(command), e))", "False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url", "if key.startswith('top_events_'): self.text += '%s:\\n' % (key[11:]) top_events = counts.items()", "JIRA: %s\" % (str(e)[:1024])) try: if self.priority is not None:", "alert \"\"\" required_options = frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule)", "'user' not in account_conf or 'password' not in account_conf: raise", "HTTPPostAlerter(Alerter): \"\"\" Requested elasticsearch indices are sent by HTTP POST.", "self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses", "self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login", "{ 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client':", "(self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert", "','.join(intersection)) msg += ' This should be simplified to use", "%s\" % e) elastalert_logger.info(\"Alert sent to Slack\") def get_info(self): return", "if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp", "','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg =", "in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value}", "\"This alert was triggered again at %s\\n%s\" % (timestamp, text)", "SnsAlerter(Alerter): \"\"\" Send alert using AWS SNS service \"\"\" required_options", "attached to each alerters used by a rule before calling", "\"\"\" required_options = frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host", "not None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' %", "import SMTP from smtplib import SMTP_SSL from smtplib import SMTPAuthenticationError", "be simplified to use only one or the other.' logging.warning(msg)", "custom fields require special handling # Here is a sample", "= self.rule.get('hipchat_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) #", "type(value) in [list, dict]: try: value_str = self._pretty_print_as_json(value) except TypeError:", "{'id': self.priority_ids[self.priority]} except KeyError: logging.error(\"Priority %s not found. Valid priorities", "description, \"short_description\": self.rule['short_description'], \"comments\": self.rule['comments'], \"assignment_group\": self.rule['assignment_group'], \"category\": self.rule['category'], \"subcategory\":", "None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary']", "= self.rule.get(name) kw[kw_name] = missing if val is None else", "self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self, matches): body = u'⚠", "key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if key_tuple", "# post to Gitter headers = {'content-type': 'application/json'} # set", "list: self.labels = [self.labels] self.jira_args['labels'] = self.labels if self.watchers: #", "chars raise EAException(\"Error connecting to JIRA: %s\" % (str(e)[:1024])) try:", "from an account file. :param account_file: Name of the file", "if len(matches) > 1: body += '\\n----------------------------------------\\n' body += u'", "+= ' - %s' % (qk) return subject def get_info(self):", "match def _ensure_new_line(self): while self.text[-2:] != '\\n\\n': self.text += '\\n'", "alert_text_args] # Support referencing other top-level rule properties # This", "Maintain a static set of built-in fields that we explicitly", "could be vulnerable to shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format", "self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors,", "= self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if", "issue summary. :param matches: A list of dictionaries of relevant", "+ '\\n\\n' return unicode(text) def create_default_title(self, matches): return self.rule['name'] def", "strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule: if type(value) ==", "# HipChat sends 400 bad request on messages longer than", "!= 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') !=", "= None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support referencing other", "%s\\n%s\" % (timestamp, text) self.client.add_comment(ticket, comment) def alert(self, matches): title", "running command %s\" % (' '.join(command))) except OSError as e:", "if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix", "= self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if", "with an identifier 'key' elif array_items == 'option': self.jira_args[arg_name] =", "RequestException as e: raise EAException(\"Error posting to VictorOps: %s\" %", "self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime =", "self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' +", "Parse everything into description. description = str(BasicMatchString(self.rule, match)) # Set", "are strings, but # in reality, they are required to", "(self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type': 'debug'}", "elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] =", "%s\" % (str(e)[:1024])) try: if self.priority is not None: self.jira_args['priority']", "return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x, y)", "self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body = self.create_alert_body(matches) # post", "requests import stomp from exotel import Exotel from jira.client import", "%s\" % (' '.join(command))) except OSError as e: raise EAException(\"Error", "\"\"\" for match in matches: payload = match if self.post_all_values", "isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix", "[lookup_es_key(self.match, arg) for arg in alert_text_args] # Support referencing other", "lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type': 'debug'} class", "to_addr = recipient if 'email_add_domain' in self.rule: to_addr = [name", "since it would have been matched in the lookup_es_key call", "from util import pretty_ts from util import ts_now from util", "watcher or list if type(self.watchers) != list: self.watchers = [self.watchers]", "'datetime']: # Special case for multi-select custom types (the JIRA", "self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from }", "it was provided proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else", "except: raise EAException(\"Error posting to Exotel\"), None, sys.exc_info()[2] elastalert_logger.info(\"Trigger sent", "we're walking for key, value in root.copy().iteritems(): if type(value) ==", "import logging import subprocess import sys import warnings from email.mime.text", "','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s and status not in", "SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port)", "case for two built-in types, id: issuekey and id: thumbnail", "} ] } if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override", "= self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613')", "None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value incident_key_values", "alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule: alert_text_args =", "frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key =", "this alert. At minimum, this should contain a field type", "self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password =", "self.pipeline['jira_server'] = self.server def create_alert_body(self, matches): body = self.description +", "%s\\n' % (term, count) self.text += '\\n' def _add_match_items(self): match_items", "not top_events: self.text += 'No events found.\\n' else: top_events.sort(key=lambda x:", "= rule # pipeline object is created by ElastAlerter.send_alert() #", "to the type of Alerter. \"\"\" return {'type': 'Unknown'} def", "text def create_default_title(self, matches, for_search=False): # If there is a", "Exception( \"Exception encountered when trying to add '{0}' as a", "= self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args", "= self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if", "= '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in", "id. \"\"\" priorities = self.client.priorities() self.priority_ids = {} for x", "sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: # This blob contains non-unicode,", "# Non serializable object, fallback to str pass self.text +=", "get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text =", "' '.join(self.last_command)} class SnsAlerter(Alerter): \"\"\" Send alert using AWS SNS", "# Separate text of aggregated alerts with dashes if len(matches)", "and subp.wait(): raise EAException(\"Non-zero exit code while running command %s\"", "def get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class", "+= ' This should be simplified to use only one", "{'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): \"\"\" Creates a Jira", "value): strValue = unicode(value) if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1]", "frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost')", "self.create_alert_body(matches) # Add JIRA ticket if it exists if self.pipeline", "body.replace('<', '&lt;') body = body.replace('>', '&gt;') return body def alert(self,", "If there is a bcc then also convert it to", "alert(self, matches): body = self.create_alert_body(matches) # HipChat sends 400 bad", "(%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s and", "arrays of simple types like strings or numbers if arg_type", "rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None)", "ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except RequestException as e: raise", "file must have user and password fields') self.user = account_conf['user']", "type(value) == int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return", "%s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append(", "'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '') def", "+ '..(truncated)' # Use appropriate line ending for text/html if", "modifying the contents of the structure we're walking for key,", "Match is a dictionary of information about the alert. :param", "{} for name, kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name)", "# Convert email to a list if it isn't already", "not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return None", "room %s\" % self.hipchat_room_id) def get_info(self): return {'type': 'hipchat', 'hipchat_room_id':", "\"\"\" Creates custom alert title to be used, e.g. as", "and give some # context as to which watcher failed", "subprocess import sys import warnings from email.mime.text import MIMEText from", "+= '\\nJIRA ticket: %s' % (url) to_addr = self.rule['email'] if", "body.encode('UTF-8') body = body.replace('&', '&amp;') body = body.replace('<', '&lt;') body", "type(item) == list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif type(root)", "alerts via stomp to a broker. \"\"\" required_options = frozenset(['stomp_hostname',", "[post_url] self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload',", "we will do best-effort and try to set a string", "an email alert \"\"\" required_options = frozenset(['email']) def __init__(self, *args):", "in the title if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):", "'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server',", "value_str = unicode(value) value_str.replace('\\\\n', '\\n') if type(value) in [list, dict]:", "containing fields in match for the given rule. \"\"\" def", "JIRAError from requests.exceptions import RequestException from staticconf.loader import yaml_loader from", "email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr =", "qk: subject += ' - %s' % (qk) return subject", "'') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region',", "sent to Twilio\") def get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number}", "= self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name =", "relevant information to the alert. \"\"\" raise NotImplementedError() def get_info(self):", ":param account_file: Name of the file which contains user and", "the specified endpoint(s). \"\"\" for match in matches: payload =", "# and attached to each alerters used by a rule", "!= list: self.labels = [self.labels] self.jira_args['labels'] = self.labels if self.watchers:", "underscores to spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower() # All", "'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if", "class EmailAlerter(Alerter): \"\"\" Sends an email alert \"\"\" required_options =", "elastalert_logger.info(\"Alert sent to Gitter\") def get_info(self): return {'type': 'gitter', 'gitter_webhook_url':", "val is None else val for val in alert_subject_values] return", "self.rule.get('servicenow_proxy', None) def alert(self, matches): for match in matches: #", "'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url", "self.smtp.close() elastalert_logger.info(\"Sent email to %s\" % (to_addr)) def create_default_title(self, matches):", "self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ {", "is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value", "self.rule.get('jira_components', self.rule.get('jira_component')) # We used to support only a single", "in self.known_field_list: # Remove the jira_ part. Convert underscores to", "unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname", "'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name,", "we may be modifying the contents of the structure we're", "rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url", "match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text", "has configured if jira_field.startswith('jira_') and jira_field not in self.known_field_list: #", "self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')", "rule level properties too for i in range(len(incident_key_values)): if incident_key_values[i]", "= frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key =", "timestamp to subject if 'query_key' in self.rule: qk = matches[0].get(self.rule['query_key'])", "if not field: # Log a warning to ElastAlert saying", "None) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name'])", "self.telegram_proxy} if self.telegram_proxy else None payload = { 'chat_id': self.telegram_room_id,", "from util import EAException from util import elastalert_logger from util", "self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)", "self.rule: qk = matches[0].get(self.rule['query_key']) if qk: subject += ' -", "sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent sns notification to %s\"", "self.rule.get('telegram_proxy', None) def alert(self, matches): body = u'⚠ *%s* ⚠", "contain a field type corresponding to the type of Alerter.", "else: return self.rule[strValue[1:-1]] else: return value def alert(self, match): \"\"\"", "for key, value in root.copy().iteritems(): if type(value) == dict or", "debug alerter uses a Python logger (by default, alerting to", "since we may be modifying the contents of the structure", "ones) fields = self.client.fields() for jira_field, value in self.rule.iteritems(): #", "a query_key, use that in the title if 'query_key' in", "in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) # Support referencing other", "TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent sns notification to %s\" %", "proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None try: response", "'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override", "\"category\": self.rule['category'], \"subcategory\": self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"] } try:", "context as to which watcher failed to be added raise", "unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes", "boto3 import requests import stomp from exotel import Exotel from", "each alert \"\"\" required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) #", "Therefore, try both just in case for identifier in ['name',", "if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host)", "this is only the case for two built-in types, id:", "self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')", "This is necessary for search to work. Other special characters", ") text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain", "self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user,", "\"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\", \"state_message\": body } try: response =", "match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment", "2. A custom field that a JIRA admin has configured", "to subject if 'query_key' in self.rule: qk = matches[0].get(self.rule['query_key']) if", "match for the given rule. \"\"\" def __init__(self, rule, match):", "convert it to a list if it isn't bcc =", "False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity',", "conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type': 'stomp'}", "raise EAException(\"Error posting to Gitter: %s\" % e) elastalert_logger.info(\"Alert sent", "range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee): self.assignee = assignee", "response != 200: raise EAException(\"Error posting to Exotel, response code", "e) elastalert_logger.info(\"Alert sent to Slack\") def get_info(self): return {'type': 'slack',", "None else val for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else:", "for arg in alert_subject_args] # Support referencing other top-level rule", "at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] =", "= Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name'] + self.sms_body response", "raise EAException(\"Error connecting to SMTP host: %s\" % (e)) except", "for command_arg in self.rule['command']] else: command = [command_arg % matches[0]", "if not top_events: self.text += 'No events found.\\n' else: top_events.sort(key=lambda", ") fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for %s at %s:'", "key, since it would have been matched in the lookup_es_key", "text += \"Aggregation resulted in the following data for summary_table_fields", "if self.bump_not_in_statuses: jql = '%s and status not in (%s)'", "self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload',", "raise EAException(\"Error posting to slack: %s\" % e) elastalert_logger.info(\"Alert sent", "self.jira_args[arg_name] = {'value': value} # Complex type else: self.jira_args[arg_name] =", "rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url", "'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id =", "raise EAException(\"Error posting to ServiceNow: %s\" % e) elastalert_logger.info(\"Alert sent", "for url in self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),", "self.create_alert_body(matches) # post to victorops headers = {'content-type': 'application/json'} #", "self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body = self.create_alert_body(matches)", "text = '' if 'aggregation' in self.rule and 'summary_table_fields' in", "EAException(\"Error creating JIRA ticket using jira_args (%s): %s\" % (self.jira_args,", "{'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): \"\"\" Requested elasticsearch indices", "alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg in", "= self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]", "self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self, matches): body = self.create_alert_body(matches)", "self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url =", "= self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width =", ") elastalert_logger.info(\"Sent sns notification to %s\" % (self.sns_topic_arn)) class HipChatAlerter(Alerter):", "create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for match in matches: body", "root): # Support referencing other top-level rule properties to avoid", "{} for x in range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self,", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to VictorOps:", "contains non-unicode, so lets pretend it's Latin-1 to show something", "matches[0] for command_arg in self.rule['command']] self.last_command = command except KeyError", "'application/json'} # set https proxy, if it was provided proxies", "staticconf.loader import yaml_loader from texttable import Texttable from twilio.base.exceptions import", "and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields,", "lookup_es_key from util import pretty_ts from util import ts_now from", "and strValue.endswith('$') and strValue[1:-1] in self.rule: if type(value) == int:", "if cc and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] # If", "email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent email to %s\" % (to_addr)) def create_default_title(self,", "= self.format_body(body) # post to slack headers = {'content-type': 'application/json'}", "# Also attempt to handle arrays of complex types that", "value for a multi-value field e.g. jira_labels: Only_One_Label if type(value)", "body = body.replace('&', '&amp;') body = body.replace('<', '&lt;') body =", "in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default title, get", "description = str(BasicMatchString(self.rule, match)) # Set proper headers headers =", "in matches: # Parse everything into description. description = str(BasicMatchString(self.rule,", "types, id: issuekey and id: thumbnail if not ('schema' in", "self.match.items() match_items.sort(key=lambda x: x[0]) for key, value in match_items: if", "self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors =", "None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue =", "directly adjacent to words appear to be ok title =", "= self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches): client =", "self.known_field_list: # Remove the jira_ part. Convert underscores to spaces", "verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise", "\"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There are likely others", "e: raise EAException(\"Error while running command %s: %s\" % ('", "unicode(text) def create_default_title(self, matches): return self.rule['name'] def get_account(self, account_file): \"\"\"", "= [{'name': component} for component in self.components] if self.labels: #", "None for url in self.post_url: try: response = requests.post(url, data=json.dumps(payload,", "been matched in the lookup_es_key call above for i in", "while running command %s: %s\" % (' '.join(command), e)) def", "as e: raise EAException(\"Error creating JIRA ticket using jira_args (%s):", "None) def alert(self, matches): for match in matches: # Parse", "= self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self, matches): body =", "__init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id =", "self.text += '\\n' def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x:", "in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception(\"Error", "def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id =", "a watcher. Does the user exist?\\n{1}\" .format( watcher, ex )),", "elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default", "matches): # Format the command and arguments try: if self.new_style_string_format:", "= self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email to a", "'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None)", "email_msg['To'] = ', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to',", "compatibility # while also giving the user-facing API a more", "if intersection: msg = '%s Both have common statuses of", "# All jira fields should be found in the 'id'", "val for val in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def", "\"ElastAlert\", \"state_message\": body } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder),", "def alert(self, matches): body = self.create_alert_body(matches) # Add JIRA ticket", "of relevant information to the alert. \"\"\" if 'alert_subject' in", ".format( watcher, ex )), None, sys.exc_info()[2] except JIRAError as e:", "= self.server def create_alert_body(self, matches): body = self.description + '\\n'", "= self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port')", "\"assignment_group\": self.rule['assignment_group'], \"category\": self.rule['category'], \"subcategory\": self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"]", "post_key, es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers =", "self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception(\"Error while commenting on", "complex types that have to be passed as objects with", "priorities[x].id def set_assignee(self, assignee): self.assignee = assignee if assignee: self.jira_args['assignee']", "self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width:", "\"\"\" required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url", "EAException(\"Error posting to pagerduty: %s\" % e) elastalert_logger.info(\"Trigger sent to", "-*- import copy import datetime import json import logging import", "'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default title,", "get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): \"\"\" Creates", ":param matches: A list of dictionaries of relevant information to", "\"\"\" required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule)", "= lookup_es_key(self.match, name) # Support referencing other top-level rule properties", "type(value) == dict or type(value) == list: self.resolve_rule_references(root[key]) else: root[key]", "x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text", "that is not covered by the set that we are", "arg) for arg in alert_text_args] # Support referencing other top-level", "%s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] =", "a warning to ElastAlert saying that we couldn't find that", "'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): \"\"\" Creates a Microsoft Teams Conversation", "matches): body = self.create_alert_body(matches) # post to pagerduty headers =", "be provided as an object. if 'custom' in field['schema'] and", "logging.exception(\"Error while commenting on ticket %s: %s\" % (ticket, e))", "= self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color =", "= [] qk = self.rule.get('query_key', None) fullmessage = {} for", "'query_key' in self.rule: qk = matches[0].get(self.rule['query_key']) if qk: subject +=", "= self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING", ") sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent", "that we don't have on our radar # 2. A", "alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw = {}", "RequestException as e: raise EAException(\"Error posting to HipChat: %s\" %", "None payload = { \"description\": description, \"short_description\": self.rule['short_description'], \"comments\": self.rule['comments'],", "an identifier 'key' elif array_items == 'option': self.jira_args[arg_name] = [{'value':", "self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy", "= self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity", "encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text = '' if 'alert_text' not", "self.hipchat_from = self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain,", "'<br />') # Post to HipChat headers = {'content-type': 'application/json'}", "jira_ part. Convert underscores to spaces normalized_jira_field = jira_field[5:].replace('_', '", "= [self.rule['email']] # If there is a cc then also", "a multi-value field e.g. jira_labels: Only_One_Label if type(value) != list:", "self.client.add_watcher(self.issue.key, watcher) except Exception as ex: # Re-raise the exception,", "body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`', \"'\")", "200: raise EAException(\"Error posting to Exotel, response code is %s\"", "e) elastalert_logger.info(\"Trigger sent to Twilio\") def get_info(self): return {'type': 'twilio',", "# Support single component or list if type(self.components) != list:", "smtplib import SMTP from smtplib import SMTP_SSL from smtplib import", ":param rule: The rule configuration. \"\"\" required_options = frozenset([]) def", "= set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory',", "_ensure_new_line(self): while self.text[-2:] != '\\n\\n': self.text += '\\n' def _add_custom_alert_text(self):", "self.rule[strValue[1:-1]] else: return value def alert(self, match): \"\"\" Send an", "'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): \"\"\" Requested elasticsearch indices are sent", "self.watchers: for watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception", "# Run command and pipe data try: subp = subprocess.Popen(command,", "url in self.post_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,", "set_assignee(self, assignee): self.assignee = assignee if assignee: self.jira_args['assignee'] = {'name':", "self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts in self.match.items(): if key.startswith('top_events_'):", "super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number =", "and created >= \"%s\"' % (self.project, title, date) if self.bump_in_statuses:", "lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if '@' in recipient: to_addr", "val in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule:", "return json.JSONEncoder.default(self, obj) class BasicMatchString(object): \"\"\" Creates a string containing", "self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self,", "self.jira_args[arg_name] = value # Number type elif arg_type == 'number':", "\"\"\" Requested elasticsearch indices are sent by HTTP POST. Encoded", "isinstance(self.rule['command'], basestring): self.shell = True if '%' in self.rule['command']: logging.warning('Warning!", "% e) elastalert_logger.info(\"HTTP Post alert sent.\") def get_info(self): return {'type':", "only first 1024 chars raise EAException(\"Error connecting to JIRA: %s\"", "Send an alert. Match is a dictionary of information about", "jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \\ (','.join(self.bump_in_statuses),", "return subject def get_info(self): return {'type': 'email', 'recipients': self.rule['email']} class", "the alert entirely? Probably the latter... raise Exception(\"Could not find", "in self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self, matches):", "self.telegram_room_id} class GitterAlerter(Alerter): \"\"\" Creates a Gitter activity message for", "ticket %s' % (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket,", "types that have to be passed as objects with an", "if it was provided proxies = {'https': self.victorops_proxy} if self.victorops_proxy", "(self.create_title(matches)) for match in matches: body += unicode(BasicMatchString(self.rule, match)) #", "body += '\\n----------------------------------------\\n' body += u' ```' headers = {'content-type':", "= self.rule.get('bcc') if bcc and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']]", "%s' % (url) to_addr = self.rule['email'] if 'email_from_field' in self.rule:", "msg, ','.join(intersection)) msg += ' This should be simplified to", "logger (by default, alerting to terminal). \"\"\" def alert(self, matches):", "self.assignee = assignee if assignee: self.jira_args['assignee'] = {'name': assignee} elif", "self.bump_not_in_statuses: jql = '%s and status not in (%s)' %", "= self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile )", "matches: # Parse everything into description. description = str(BasicMatchString(self.rule, match))", "was provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None", "% (term, count) self.text += '\\n' def _add_match_items(self): match_items =", "types else: # Simple string types if arg_type in ['string',", "in the lookup_es_key call above for i in xrange(len(alert_subject_values)): if", "= self.slack_emoji_override for url in self.slack_webhook_url: try: response = requests.post(url,", "match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for", "1: body += '\\n----------------------------------------\\n' body += u' ```' headers =", "return {'type': 'debug'} class EmailAlerter(Alerter): \"\"\" Sends an email alert", "watcher, ex )), None, sys.exc_info()[2] except JIRAError as e: raise", "profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) )", "a ServiceNow alert \"\"\" required_options = set([ 'username', 'password', 'servicenow_rest_url',", "else: self.jira_args[arg_name] = value # Number type elif arg_type ==", "+= preformatted_text class Alerter(object): \"\"\" Base class for types of", "for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match']", "requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status()", "the structure we're walking for i, item in enumerate(copy.copy(root)): if", "Note this is only the case for two built-in types,", "e)) elastalert_logger.info(\"Opened Jira ticket: %s\" % (self.issue)) if self.pipeline is", "basestring): self.shell = True if '%' in self.rule['command']: logging.warning('Warning! You", "> 9999): body = body[:9980] + '..(truncated)' # Use appropriate", "corresponding to the type of Alerter. \"\"\" return {'type': 'Unknown'}", "match in matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) >", "{'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): \"\"\" Create", ")), None, sys.exc_info()[2] except JIRAError as e: raise EAException(\"Error creating", "raise Exception(\"Could not find a definition for the jira field", "'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { \"information\": body.encode('UTF-8'), }, }", "= self.rule['twilio_from_number'] def alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try:", "Post alert sent.\") def get_info(self): return {'type': 'http_post', 'http_post_webhook_url': self.post_url}", "basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix and", "self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp =", "cls=DateTimeEncoder) + '\\n' stdout, stderr = subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\", False)", "' ').lower()), None) if field: break if not field: #", "'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields':", "%s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field'])", "type(value) == list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self,", "reverse=True) for term, count in top_events: self.text += '%s: %s\\n'", "= [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert", "self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination =", ">= \"%s\"' % (self.project, title, date) if self.bump_in_statuses: jql =", "self.telegram_bot_token, \"sendMessage\") self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self, matches): body", "fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) if field:", "= self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain", "import SMTPAuthenticationError from smtplib import SMTPException from socket import error", "get_info(self): return {'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): \"\"\" Creates", "if type(self.components) != list: self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components']", "= assignee if assignee: self.jira_args['assignee'] = {'name': assignee} elif 'assignee'", "the given rule. \"\"\" def __init__(self, rule, match): self.rule =", "return title def get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter): required_options", "except UnicodeDecodeError: # This blob contains non-unicode, so lets pretend", "body.replace('&', '&amp;') body = body.replace('<', '&lt;') body = body.replace('>', '&gt;')", "if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') #", "self.create_title(matches), 'text': body } if self.ms_teams_theme_color != '': payload['themeColor'] =", "incident_key_values[i] = key_value incident_key_values = ['<MISSING VALUE>' if val is", "self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')", "[ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description',", "redundant copy/paste if type(root) == list: # Make a copy", "a more representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We", "As a convenience, support the scenario wherein the user only", "value} # Complex type else: self.jira_args[arg_name] = {'name': value} def", "self.text += '%s:\\n' % (key[11:]) top_events = counts.items() if not", "Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an aggregate count", "If not, it will manifest itself as an API error", "super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url =", "# \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There are likely others that will need", "', ' ') title = title.replace('\\\\', '\\\\\\\\') date = (datetime.datetime.now()", "(key[11:]) top_events = counts.items() if not top_events: self.text += 'No", "is either: # 1. A built-in supported field in JIRA", "'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): \"\"\" Creates a Jira ticket", "self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def alert(self, matches):", "self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert')", "[{'name': self.components}] else: self.jira_args['components'] = [{'name': component} for component in", "as to which watcher failed to be added raise Exception(", "for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))", "self.rule.get('bcc') if bcc and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix", "as e: raise EAException(\"Error posting to twilio: %s\" % e)", "if alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i]", "Creates a ServiceNow alert \"\"\" required_options = set([ 'username', 'password',", "count in match_aggregation.iteritems(): text_table.add_row([key for key in keys] + [count])", "= body.replace('&', '&amp;') body = body.replace('<', '&lt;') body = body.replace('>',", "'ElastAlert: %s' % (self.rule['name']) # If the rule has a", "identifier in ['name', 'id']: field = next((f for f in", "representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '')", "to load the alert entirely? Probably the latter... raise Exception(\"Could", "% (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s", "creation. Only as a follow-up action if self.watchers: for watcher", "(e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent email to %s\" %", "set https proxy, if it was provided proxies = {'https':", "self.rule: if type(value) == int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]]", "EAException(\"Error posting to ServiceNow: %s\" % e) elastalert_logger.info(\"Alert sent to", "types of alerts. :param rule: The rule configuration. \"\"\" required_options", "initial creation. Only as a follow-up action if self.watchers: for", "in self.match.items(): if key.startswith('top_events_'): self.text += '%s:\\n' % (key[11:]) top_events", "= TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as", "not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text +=", "in account_conf or 'password' not in account_conf: raise EAException('Account file", "[{'value': v} for v in value] else: # Try setting", "list if it isn't already if isinstance(self.rule['email'], basestring): self.rule['email'] =", "raise an exception since we don't know how to set", "session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent sns notification to", "'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self,", "e) elastalert_logger.info(\"Alert sent to HipChat room %s\" % self.hipchat_room_id) def", "elif isinstance(recipient, list): to_addr = recipient if 'email_add_domain' in self.rule:", "if self.pipeline is not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] =", "either: # 1. A built-in supported field in JIRA that", "self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None) # Deprecated self.profile =", "field['id'] # Check the schema information to decide how to", "\"message_type\": self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\", \"state_message\": body } try:", "RequestException as e: raise EAException(\"Error posting to Telegram: %s\" %", "raise EAException(\"Error connecting to JIRA: %s\" % (str(e)[:1024])) try: if", "as e: raise EAException(\"Error posting to Gitter: %s\" % e)", "= requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException", "EAException(\"Error posting to Exotel, response code is %s\" % response)", "proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None payload =", "'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields':", "matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: body", "sys.exc_info()[2] except JIRAError as e: raise EAException(\"Error creating JIRA ticket", "class MsTeamsAlerter(Alerter): \"\"\" Creates a Microsoft Teams Conversation Message for", "from texttable import Texttable from twilio.base.exceptions import TwilioRestException from twilio.rest", "'alert_text_kw' in self.rule: kw = {} for name, kw_name in", "the command and arguments try: if self.new_style_string_format: command = [command_arg.format(match=matches[0])", "xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i]) if alert_value:", "for i in range(len(incident_key_values)): if incident_key_values[i] is None: key_value =", "= self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile", "'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid =", "if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text", "').lower()), None) if field: break if not field: # Log", "'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file')", "be used as custom fields require special handling # Here", "Does the user exist?\\n{1}\" .format( watcher, ex )), None, sys.exc_info()[2]", "in self.rule: kw = {} for name, kw_name in self.rule.get('alert_text_kw').items():", "isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include a count aggregation", "None) fullmessage = {} for match in matches: if qk", "more representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description',", "information to decide how to set the value correctly #", "cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text = ''", "= self.rule.get('cc') if cc and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']]", "to a list if it isn't bcc = self.rule.get('bcc') if", "required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key", "\"monitoring_tool\": \"ElastAlert\", \"state_message\": body } try: response = requests.post(self.url, data=json.dumps(payload,", "alert_subject_values = ['<MISSING VALUE>' if val is None else val", "that a JIRA admin has configured if jira_field.startswith('jira_') and jira_field", "for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))", "be used, e.g. as an e-mail subject or JIRA issue", "top_events.sort(key=lambda x: x[1], reverse=True) for term, count in top_events: self.text", "{'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): \"\"\" Creates a ServiceNow", "'date', 'datetime']: # Special case for multi-select custom types (the", "'\\n' body += self.get_aggregation_summary_text(matches) for match in matches: body +=", "__init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key']", "HipChat: %s\" % e) elastalert_logger.info(\"Alert sent to HipChat room %s\"", "Teams\") def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter):", "ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))", "self.rule['category'], \"subcategory\": self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"] } try: response", "to support only a single component. This allows us to", "cc = self.rule.get('cc') if cc and isinstance(cc, basestring): self.rule['cc'] =", "user and password fields') self.user = account_conf['user'] self.password = account_conf['password']", "for key, value in match_items: if key.startswith('top_events_'): continue value_str =", "self.rule = rule self.match = match def _ensure_new_line(self): while self.text[-2:]", "= self.create_alert_body(matches) # Add JIRA ticket if it exists if", "room message for each alert \"\"\" required_options = frozenset(['slack_webhook_url']) def", "command: %s\" % (e)) # Run command and pipe data", "elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter): \"\"\"", "host: %s\" % (e)) except SMTPAuthenticationError as e: raise EAException(\"SMTP", "= self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime", "'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])", "body += u' ```' headers = {'content-type': 'application/json'} # set", "frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token']", "first 1024 chars raise EAException(\"Error connecting to JIRA: %s\" %", "msg += ' This should be simplified to use only", "if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) if field: break", "body += self.get_aggregation_summary_text(matches) for match in matches: body += unicode(JiraFormattedMatchString(self.rule,", "if alert_value: alert_text_values[i] = alert_value alert_text_values = [missing if val", "'alert_subject' not in self.rule: title = self.create_default_title(matches, True) else: title", "as e: raise EAException(\"Error posting to HipChat: %s\" % e)", "Jira ticket for each alert \"\"\" required_options = frozenset(['jira_server', 'jira_account_file',", "just in case for identifier in ['name', 'id']: field =", "else None payload = { '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary':", "to support only a single label. This allows us to", "['name', 'id']: field = next((f for f in fields if", "VictorOps Incident for each alert \"\"\" required_options = frozenset(['victorops_api_key', 'victorops_routing_key',", "only a single label. This allows us to maintain backwards", "if val is None else val for val in incident_key_values]", "str pass self.text += '%s: %s\\n' % (key, value_str) def", "then also convert it a list if it isn't cc", "'name' field. Therefore, try both just in case for identifier", "using 'name' as the key # This may not work,", "try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception(\"Error while commenting", "% (count) return title def get_info(self): return {'type': 'jira'} class", "it isn't already if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] #", "EAException(\"Error posting to ms teams: %s\" % e) elastalert_logger.info(\"Alert sent", "return None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue", "\"\"\" required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule)", "[self.rule['cc']] # If there is a bcc then also convert", "arg) for arg in self.pagerduty_incident_key_args] # Populate values with rule", "for url in self.post_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),", "+ '\\n\\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line()", "have on our radar # 2. A custom field that", "alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value alert_subject_values =", "self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level',", "response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except", "self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee): self.assignee = assignee if", "\"\"\" Gets the username and password from an account file.", "self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString):", "create_title(self, matches): \"\"\" Creates custom alert title to be used,", "self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text class", "backwards compatibility # while also giving the user-facing API a", "in self.rule['command']] else: command = [command_arg % matches[0] for command_arg", "each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text", "aggregation period for match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key))", "'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body } if self.ms_teams_theme_color !=", "self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')", "__init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy',", "'jira_project', 'jira_server', 'jira_watchers', ] # Some built-in jira types that", "self.jira_args = {'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}} if self.components:", "Run command and pipe data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE,", "'') def format_body(self, body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8') body", "+ self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host,", "y) for x, y in self.match.items() if not x.startswith('top_events_')]) json_blob", "self.password) except (SMTPException, error) as e: raise EAException(\"Error connecting to", "self.hipchat_message_format == 'html': body = body.replace('\\n', '<br />') # Post", "qk = self.rule.get('query_key', None) fullmessage = {} for match in", "related to this alert. At minimum, this should contain a", "= self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "as e: raise EAException(\"Error posting to pagerduty: %s\" % e)", "as e: raise EAException(\"Error posting to VictorOps: %s\" % e)", "%s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for", "jira field '{0}'\".format(normalized_jira_field)) arg_name = field['id'] # Check the schema", "list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue", "notification for each alert \"\"\" required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def", "self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x,", "alert was triggered again at %s\\n%s\" % (timestamp, text) self.client.add_comment(ticket,", "for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert", "is not covered by the set that we are aware", "%s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) )", "ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is", "if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not", "= self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy =", "% (' '.join(command))) except OSError as e: raise EAException(\"Error while", "super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level", "== dict or type(value) == list: self.resolve_rule_references(root[key]) else: root[key] =", "as an e-mail subject or JIRA issue summary. :param matches:", "for keys, count in match_aggregation.iteritems(): text_table.add_row([key for key in keys]", "value def alert(self, match): \"\"\" Send an alert. Match is", "[{'name': component} for component in self.components] if self.labels: # Support", "add '{0}' as a watcher. Does the user exist?\\n{1}\" .format(", "keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else:", "jira_bump_not_in_statuses (%s) are set.' % \\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection =", "data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json", "body = body.encode('UTF-8') body = body.replace('&', '&amp;') body = body.replace('<',", "self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body", "for i in xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value =", "'.join(self.last_command)} class SnsAlerter(Alerter): \"\"\" Send alert using AWS SNS service", "to set the value correctly # If the schema information", "= 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None)", "self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy", "{'name': assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches):", "to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent email to %s\" % (to_addr)) def", "subp.wait(): raise EAException(\"Non-zero exit code while running command %s\" %", "a follow-up action if self.watchers: for watcher in self.watchers: try:", "for match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key", "value] # Also attempt to handle arrays of complex types", "couldn't find that type? # OR raise and fail to", "self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self, matches): for match in", "elastalert_logger.info(\"Sent sns notification to %s\" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): \"\"\"", "since we don't know how to set it # Note", "while also giving the user-facing API a more representative name", "def _add_top_counts(self): for key, counts in self.match.items(): if key.startswith('top_events_'): self.text", "formatdate from smtplib import SMTP from smtplib import SMTP_SSL from", "there is a cc then also convert it a list", "self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self,", "self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self,", "!= 'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items", "stomp to a broker. \"\"\" required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login',", "have common statuses of (%s). As such, no tickets will", "= self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values',", "follow-up action if self.watchers: for watcher in self.watchers: try: self.client.add_watcher(self.issue.key,", "\"Alert sent to Telegram room %s\" % self.telegram_room_id) def get_info(self):", "if isinstance(post_url, basestring): post_url = [post_url] self.post_url = post_url self.post_proxy", "( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self,", "'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, \"sendMessage\") self.telegram_proxy =", "= [int(v) for v in value] # Also attempt to", "isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] # If there is a", "of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count']", "isinstance(recipient, list): to_addr = recipient if 'email_add_domain' in self.rule: to_addr", "a query_key, add that value plus timestamp to subject if", "'%' in self.rule['command']: logging.warning('Warning! You could be vulnerable to shell", "def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) return", "!= 200: raise EAException(\"Error posting to Exotel, response code is", "self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include", "None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting", "as an object. if 'custom' in field['schema'] and field['schema']['custom'] in", "proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to", "from socket import error import boto3 import requests import stomp", "v in value] # Also attempt to handle arrays of", "= lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule'] =", "self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self,", "list of dictionaries of relevant information to the alert. \"\"\"", "'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number',", "Exotel, response code is %s\" % response) except: raise EAException(\"Error", "elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule,", "self.profile = self.rule.get('boto_profile', None) # Deprecated self.profile = self.rule.get('aws_profile', None)", "proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException(\"Error posting", "arg_type in ['string', 'date', 'datetime']: # Special case for custom", "= ', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])", "us to maintain backwards compatibility # while also giving the", "def alert(self, matches): # Format the command and arguments try:", "'': payload['themeColor'] = self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try: response", "} # set https proxy, if it was provided proxies", "self.rule['query_key']): title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']),", "== int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return value", "a mapping of priority index to id. \"\"\" priorities =", "email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From'] = self.from_addr", "% (str(e)[:1024])) try: if self.priority is not None: self.jira_args['priority'] =", "if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] =", "list: value = [value] array_items = field['schema']['items'] # Simple string", "'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def", "self.jira_args['components'] = [{'name': component} for component in self.components] if self.labels:", "return self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class", "'%s: %s\\n' % (term, count) self.text += '\\n' def _add_match_items(self):", "it is either: # 1. A built-in supported field in", "indent=4, ensure_ascii=False) except UnicodeDecodeError: # This blob contains non-unicode, so", "matches): \"\"\" Each match will trigger a POST to the", "= self.format_body(body) # post to Teams headers = {'content-type': 'application/json'}", "rule properties # This technically may not work if there", "and custom ones) fields = self.client.fields() for jira_field, value in", "ticket: %s\" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket']", "{'name': self.issue_type}} if self.components: # Support single component or list", "= self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee')", "'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items =", "return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid',", "'datetime']: # Special case for custom types (the JIRA metadata", "rule. \"\"\" def __init__(self, rule, match): self.rule = rule self.match", "using jira_args (%s): %s\" % (self.jira_args, e)) elastalert_logger.info(\"Opened Jira ticket:", "self.telegram_room_id) def get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter):", "def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format", "top_events = counts.items() if not top_events: self.text += 'No events", "try to set a string value known_field_list = [ 'jira_account_file',", "val = lookup_es_key(self.match, name) # Support referencing other top-level rule", "Post alert: %s\" % e) elastalert_logger.info(\"HTTP Post alert sent.\") def", "to Teams headers = {'content-type': 'application/json'} # set https proxy,", "jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses))", "= self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file", "body = \"```{0}```\".format('```\\n\\n```'.join(x for x in body.split('\\n'))).replace('\\n``````', '') return body", "https://api.slack.com/docs/formatting body = body.encode('UTF-8') body = body.replace('&', '&amp;') body =", "self.rule.get('cc') if cc and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] #", "(term, count) self.text += '\\n' def _add_match_items(self): match_items = self.match.items()", "self.text = '' if 'alert_text' not in self.rule: self.text +=", "{ \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.post_proxy}", "match_aggregation = {} # Maintain an aggregate count for each", "= True def alert(self, matches): # Format the command and", "format_body(self, body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8') body = body.replace('&',", "required_options = set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category',", "if key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\\\n', '\\n') if type(value)", "resolve_rule_reference(self, value): strValue = unicode(value) if strValue.startswith('$') and strValue.endswith('$') and", "in self.rule['command']] self.last_command = command except KeyError as e: raise", "%s\" % (e)) # Run command and pipe data try:", "built-in supported field in JIRA that we don't have on", "Support referencing other top-level rule properties # This technically may", "ticket for each alert \"\"\" required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project',", "= self.rule.get('boto_profile', None) # Deprecated self.profile = self.rule.get('aws_profile', None) def", "enumerate(copy.copy(root)): if type(item) == dict or type(item) == list: self.resolve_rule_references(root[i])", "'%s:\\n' % (key[11:]) top_events = counts.items() if not top_events: self.text", "def get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter): \"\"\" The debug", "self.ms_teams_webhook_url} class SlackAlerter(Alerter): \"\"\" Creates a Slack room message for", "e: raise EAException(\"Error posting to pagerduty: %s\" % e) elastalert_logger.info(\"Trigger", "ms teams: %s\" % e) elastalert_logger.info(\"Alert sent to MS Teams\")", "POST to the specified endpoint(s). \"\"\" for match in matches:", "user-facing API a more representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))", "= self.create_alert_body(matches) # post to victorops headers = {'content-type': 'application/json'}", "alerts.append( '1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk],", "match) except JIRAError as e: logging.exception(\"Error while commenting on ticket", "Exotel\") def get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter):", "is not available, raise an exception since we don't know", "self.rule: to_addr = [recipient + self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr", "self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name] = value # Number", "= ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline", "a string containing fields in match for the given rule.", "for v in value] else: self.jira_args[arg_name] = value elif array_items", "= alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw = {} for", "'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name", "schema information is not available, raise an exception since we", "or the other.' logging.warning(msg) self.jira_args = {'project': {'key': self.project}, 'issuetype':", "(self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This API returns metadata about", "required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter,", "class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x, y) for x,", "json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text =", "else None payload = { \"description\": description, \"short_description\": self.rule['short_description'], \"comments\":", "are required to be provided as an object. if 'custom'", "= self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region", "type corresponding to the type of Alerter. \"\"\" return {'type':", "frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url']", "%s\" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] =", "e.g. as an e-mail subject or JIRA issue summary. :param", "alert using AWS SNS service \"\"\" required_options = frozenset(['sns_topic_arn']) def", "required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color", "each alert \"\"\" required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self,", "u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches, for_search=False): # If there", "match_aggregation[key_tuple] + 1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key for", "self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching']", "/>') # Post to HipChat headers = {'content-type': 'application/json'} #", "do best-effort and try to set a string value known_field_list", "field e.g. jira_labels: Only_One_Label if type(value) != list: value =", "proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException(\"Error posting", "post to slack headers = {'content-type': 'application/json'} # set https", "'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): \"\"\" Send alert using", "a more representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description =", "for match in matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches)", "x[1], reverse=True) for term, count in top_events: self.text += '%s:", "top_events: self.text += '%s: %s\\n' % (term, count) self.text +=", "search to work. Other special characters and dashes # directly", "get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter): required_options = set(['command']) def", "= requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as", "a rule before calling alert() self.pipeline = None self.resolve_rule_references(self.rule) def", "= [{'value': v} for v in value] else: # Try", "if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) + '\\n' stdout, stderr", "except RequestException as e: raise EAException(\"Error posting HTTP Post alert:", "used as custom fields require special handling # Here is", "frozenset([]) def __init__(self, rule): self.rule = rule # pipeline object", "subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) +", "self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default title, get stripped search", "= frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url =", "= requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as", "return self.rule['name'] def get_account(self, account_file): \"\"\" Gets the username and", "using AWS SNS service \"\"\" required_options = frozenset(['sns_topic_arn']) def __init__(self,", "match will trigger a POST to the specified endpoint(s). \"\"\"", "self.rule and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s matched %s'", "self.jira_args[arg_name] = [{'value': v} for v in value] else: self.jira_args[arg_name]", "to set # For anything else, we will do best-effort", "= unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = \"This", "= [value] array_items = field['schema']['items'] # Simple string types if", "elif arg_type == 'number': self.jira_args[arg_name] = int(value) elif arg_type ==", "to Telegram room %s\" % self.telegram_room_id) def get_info(self): return {'type':", "self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try: if self.smtp_ssl: if", "body = body[:9980] + '..(truncated)' # Use appropriate line ending", "Creates custom alert title to be used, e.g. as an", "} try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder),", "period for match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for", "self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body = body.encode('UTF-8')", "= '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\\nJIRA ticket: %s'", "'\\n' stdout, stderr = subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\", False) and subp.wait():", "JIRA that we don't have on our radar # 2.", "raise EAException(\"Error posting to Telegram: %s\" % e) elastalert_logger.info( \"Alert", "self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values", "summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1 else:", "= self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity", "of aggregated alerts with dashes if len(matches) > 1: body", "see at a glance how many of each aggregation_key were", "fields that we explicitly know how to set # For", "in the aggregation period for match in matches: key_tuple =", "would have been matched in the lookup_es_key call above for", "or type(item) == list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif", "= { '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches),", "determine schema information for the jira field '{0}'\".format(normalized_jira_field)) arg_type =", "import formatdate from smtplib import SMTP from smtplib import SMTP_SSL", "{ 'message': body, 'level': self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url,", "self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We used", "= self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override',", "{'type': 'debug'} class EmailAlerter(Alerter): \"\"\" Sends an email alert \"\"\"", "Gitter activity message for each alert \"\"\" required_options = frozenset(['gitter_webhook_url'])", "account file. :param account_file: Name of the file which contains", "def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts", "PagerDuty\") def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg)", "socket import error import boto3 import requests import stomp from", "self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue = unicode(value) if strValue.startswith('$') and", "in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except", "arguments try: if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg in", "part. Convert underscores to spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower()", "' ').lower() # All jira fields should be found in", "body = self.create_alert_body(matches) # Add JIRA ticket if it exists", "add that value plus timestamp to subject if 'query_key' in", "room %s\" % self.telegram_room_id) def get_info(self): return {'type': 'telegram', 'telegram_room_id':", "text/html if self.hipchat_message_format == 'html': body = body.replace('\\n', '<br />')", "def create_default_title(self, matches, for_search=False): # If there is a query_key,", "self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file =", "= requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as", "+= self.get_aggregation_summary_text(matches) for match in matches: body += unicode(JiraFormattedMatchString(self.rule, match))", "adjacent to words appear to be ok title = title.replace('", "i in xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i])", "kw[kw_name] = missing if val is None else val alert_text", "self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException(\"Error", "with JSON. \"\"\" def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url =", "key, value in root.copy().iteritems(): if type(value) == dict or type(value)", "from staticconf.loader import yaml_loader from texttable import Texttable from twilio.base.exceptions", "non-unicode, so lets pretend it's Latin-1 to show something return", "for component in self.components] if self.labels: # Support single label", "list if type(self.watchers) != list: self.watchers = [self.watchers] if self.assignee:", "# Special case for multi-select custom types (the JIRA metadata", "\"\"\" Creates a Microsoft Teams Conversation Message for each alert", "proxies = {'https': self.post_proxy} if self.post_proxy else None for url", "ex: # Re-raise the exception, preserve the stack-trace, and give", "self.rule.get(name) kw[kw_name] = missing if val is None else val", "= match[qk] else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'],", "(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg", "self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file", "self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args] #", "we are aware of, it means it is either: #", "was provided proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None", "A custom field that a JIRA admin has configured if", "in self.rule: self.text += self.rule['name'] + '\\n\\n' self._add_custom_alert_text() self._ensure_new_line() if", "priorities = self.client.priorities() self.priority_ids = {} for x in range(len(priorities)):", "!= '': payload['themeColor'] = self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try:", "self.rule['email']} class JiraAlerter(Alerter): \"\"\" Creates a Jira ticket for each", "for a multi-value field e.g. jira_labels: Only_One_Label if type(value) !=", "the lookup_es_key call above for i in xrange(len(alert_subject_values)): if alert_subject_values[i]", "issuekey and id: thumbnail if not ('schema' in field or", "of Alerter. \"\"\" return {'type': 'Unknown'} def create_title(self, matches): \"\"\"", "each unique key encountered in the aggregation period for match", "'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key']", "body def get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in", "We used to support only a single component. This allows", "'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object): \"\"\"", "A built-in supported field in JIRA that we don't have", "+ '\\n' stdout, stderr = subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\", False) and", "class PagerDutyAlerter(Alerter): \"\"\" Create an incident on PagerDuty for each", "= unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args')", "match)) # Set proper headers headers = { \"Content-Type\": \"application/json\",", "= self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr',", "(e)) except SMTPAuthenticationError as e: raise EAException(\"SMTP username/password rejected: %s\"", "\"comments\": self.rule['comments'], \"assignment_group\": self.rule['assignment_group'], \"category\": self.rule['category'], \"subcategory\": self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'],", "self.jira_args[arg_name] = [{'value': v} for v in value] else: #", "try: self.client.add_watcher(self.issue.key, watcher) except Exception as ex: # Re-raise the", "% (self.rule['name']) return subject def alert(self, matches): body = self.create_alert_body(matches)", "if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule: if type(value)", "sample of one of them: # {\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, #", "summary_table_fields + ['count'] text += \"Aggregation resulted in the following", "as e: raise EAException(\"Error posting to ServiceNow: %s\" % e)", "to Gitter headers = {'content-type': 'application/json'} # set https proxy,", "self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items()", "for the given rule. \"\"\" def __init__(self, rule, match): self.rule", "Format the command and arguments try: if self.new_style_string_format: command =", "json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise", "thumbnail if not ('schema' in field or 'type' in field['schema']):", "dictionaries of relevant information to the alert. \"\"\" if 'alert_subject'", "self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email to a list", "in matches: if qk in match: elastalert_logger.info( 'Alert for %s,", "'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title", "type(self.components) != list: self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components'] =", "JIRAError as e: logging.exception(\"Error while commenting on ticket %s: %s\"", "Maintain an aggregate count for each unique key encountered in", "self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We", "= self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors", "self.text += preformatted_text class Alerter(object): \"\"\" Base class for types", "False if isinstance(self.rule['command'], basestring): self.shell = True if '%' in", "# You can not add watchers on initial creation. Only", "= rule self.match = match def _ensure_new_line(self): while self.text[-2:] !=", "self.text += '%s: %s\\n' % (key, value_str) def _pretty_print_as_json(self, blob):", "% (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert: %s' %", "# while also giving the user-facing API a more representative", "'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body }", "= subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\", False) and subp.wait(): raise EAException(\"Non-zero exit", "else None payload = { \"message_type\": self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\":", "JIRA ticket using jql '%s': %s\" % (jql, e)) return", "aware of, it means it is either: # 1. A", "else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue = unicode(value)", "Here is a sample of one of them: # {\"id\":\"customfield_12807\",\"name\":\"My", "API a more representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description", "'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn", "'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value':", "'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id", "if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] # If there is", "a POST to the specified endpoint(s). \"\"\" for match in", "account_conf: raise EAException('Account file must have user and password fields')", "id: thumbnail if not ('schema' in field or 'type' in", "radar # 2. A custom field that a JIRA admin", "shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format = False if 'new_style_string_format'", "alert \"\"\" required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter,", "%s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title =", "self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr", "def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x: x[0]) for key,", "v in value] # Handle non-array types else: # Simple", "stack-trace, and give some # context as to which watcher", "'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '')", "self.format_body(body) # post to Teams headers = {'content-type': 'application/json'} #", "= {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None payload = {", "self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc'])", "match if self.post_all_values else {} payload.update(self.post_static_payload) for post_key, es_key in", "self.rule['timestamp_field'])) comment = \"This alert was triggered again at %s\\n%s\"", "requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except", "Creates a VictorOps Incident for each alert \"\"\" required_options =", "while running command %s\" % (' '.join(command))) except OSError as", "elastalert_logger.info(\"Trigger sent to VictorOps\") def get_info(self): return {'type': 'victorops', 'victorops_routing_key':", "rule self.match = match def _ensure_new_line(self): while self.text[-2:] != '\\n\\n':", "Add count for spikes count = matches[0].get('spike_count') if count: title", "Telegram: %s\" % e) elastalert_logger.info( \"Alert sent to Telegram room", "self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses", "will need to be updated on a case-by-case basis custom_string_types_with_special_handling", "subject = 'ElastAlert: %s' % (self.rule['name']) return subject def alert(self,", "email alert \"\"\" required_options = frozenset(['email']) def __init__(self, *args): super(EmailAlerter,", "and id: thumbnail if not ('schema' in field or 'type'", "class Alerter(object): \"\"\" Base class for types of alerts. :param", "on PagerDuty for each alert \"\"\" required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])", "if hasattr(obj, 'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self, obj) class", "would have been matched in the lookup_es_key call above if", "\"application/json;charset=utf-8\" } proxies = {'https': self.post_proxy} if self.post_proxy else None", "\"\"\" Creates a string containing fields in match for the", "if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)", "match): self.rule = rule self.match = match def _ensure_new_line(self): while", "= self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value alert_text_values = [missing", "self.text += 'No events found.\\n' else: top_events.sort(key=lambda x: x[1], reverse=True)", "else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if", "__init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token']", "'' if 'alert_text' not in self.rule: self.text += self.rule['name'] +", "incident_key_values = ['<MISSING VALUE>' if val is None else val", "self.create_default_title(matches, True) else: title = self.create_title(matches) if 'jira_ignore_in_title' in self.rule:", "self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity =", "except JIRAError as e: logging.exception(\"Error while commenting on ticket %s:", "not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self,", "'%s Both have common statuses of (%s). As such, no", "except RequestException as e: raise EAException(\"Error posting to ms teams:", "from email.mime.text import MIMEText from email.utils import formatdate from smtplib", "summary_table_fields ==> {0}:\\n\\n\".format( summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation", "self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host,", "''), '') # This is necessary for search to work.", "on ticket %s: %s\" % (ticket, e)) if self.pipeline is", "api for each alert \"\"\" required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def", "for x in body.split('\\n'))).replace('\\n``````', '') return body def alert(self, matches):", "self.password = account_conf['password'] class StompAlerter(Alerter): \"\"\" The stomp alerter publishes", "Probably the latter... raise Exception(\"Could not find a definition for", "copy import datetime import json import logging import subprocess import", "except RequestException as e: raise EAException(\"Error posting to slack: %s\"", "posting to twilio: %s\" % e) elastalert_logger.info(\"Trigger sent to Twilio\")", "if it was provided proxies = {'https': self.gitter_proxy} if self.gitter_proxy", "elastalert_logger.info( \"Alert sent to Telegram room %s\" % self.telegram_room_id) def", "on a case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',", "fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to HipChat:", "and dashes # directly adjacent to words appear to be", "text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches, for_search=False): #", "self.get_aggregation_summary_text(matches) for match in matches: body += unicode(BasicMatchString(self.rule, match)) #", "!= '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for", "self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority", "set(self.bump_in_statuses)) if intersection: msg = '%s Both have common statuses", "= (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~\"%s\" and", "= self.create_default_title(matches, True) else: title = self.create_title(matches) if 'jira_ignore_in_title' in", "False if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True", "(to_addr)) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name'])", "e-mail subject or JIRA issue summary. :param matches: A list", "summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields]", "self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default title, get stripped", "give some # context as to which watcher failed to", "body def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body)", "self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets =", "RequestException as e: raise EAException(\"Error posting to ms teams: %s\"", "values with rule level properties too for i in range(len(incident_key_values)):", "many of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields +", "load the alert entirely? Probably the latter... raise Exception(\"Could not", "dictionary of information about the alert. :param match: A dictionary", "lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at %s:' % (self.rule['name'],", "value in match_items: if key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\\\n',", "Creates a Jira ticket for each alert \"\"\" required_options =", "identifier 'key' elif array_items == 'option': self.jira_args[arg_name] = [{'value': v}", "self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body } if self.ms_teams_theme_color != '':", "[self.labels] self.jira_args['labels'] = self.labels if self.watchers: # Support single watcher", "'') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color", "# Use appropriate line ending for text/html if self.hipchat_message_format ==", "jira field '{0}'\".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle arrays of", "%s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk]", "jira.client import JIRA from jira.exceptions import JIRAError from requests.exceptions import", "a list if it isn't already if isinstance(self.rule['email'], basestring): self.rule['email']", "= [recipient + self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr = recipient", "before calling alert() self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root):", "self.rule.get('aws_profile', None) def create_default_title(self, matches): subject = 'ElastAlert: %s' %", "jira fields should be found in the 'id' or the", "self.rule.get('victorops_entity_display_name', 'no entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (", "= self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body", "= self.rule.get('smtp_cert_file') # Convert email to a list if it", "known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component',", "= match def _ensure_new_line(self): while self.text[-2:] != '\\n\\n': self.text +=", "simplified to use only one or the other.' logging.warning(msg) self.jira_args", "{ 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from':", "e: raise EAException(\"Error posting to ms teams: %s\" % e)", "['string', 'date', 'datetime']: # Special case for custom types (the", "for val in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in", "There are likely others that will need to be updated", "def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): \"\"\"", "self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def", "= self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password)", "for post_key, es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers", "custom field that a JIRA admin has configured if jira_field.startswith('jira_')", "it # Note this is only the case for two", "SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule:", "posting to Telegram: %s\" % e) elastalert_logger.info( \"Alert sent to", "unicode(value) if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule: if", "rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url", "# post to pagerduty headers = {'content-type': 'application/json'} payload =", "with dashes if len(matches) > 1: body += '\\n----------------------------------------\\n' return", "= next((f for f in fields if normalized_jira_field == f[identifier].replace('_',", "__init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project =", "body.replace('\\n', '<br />') # Post to HipChat headers = {'content-type':", "body = body.replace('`', \"'\") body = \"```{0}```\".format('```\\n\\n```'.join(x for x in", "name) # Support referencing other top-level rule properties # This", "the user-facing API a more representative name self.components = self.rule.get('jira_components',", "body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response = requests.post(self.url,", "SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file,", "EAException(\"Error posting to slack: %s\" % e) elastalert_logger.info(\"Alert sent to", "super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type =", "if self.telegram_proxy else None payload = { 'chat_id': self.telegram_room_id, 'text':", "comment = \"This alert was triggered again at %s\\n%s\" %", "'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): \"\"\" Creates a Slack room", "some # context as to which watcher failed to be", "self.servicenow_rest_url} class HTTPPostAlerter(Alerter): \"\"\" Requested elasticsearch indices are sent by", "subject def get_info(self): return {'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter):", "= [post_url] self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload =", "# If there is a bcc then also convert it", "and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if", "self.rule['short_description'], \"comments\": self.rule['comments'], \"assignment_group\": self.rule['assignment_group'], \"category\": self.rule['category'], \"subcategory\": self.rule['subcategory'], \"cmdb_ci\":", "username/password rejected: %s\" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent", "at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self):", "def __init__(self, rule, match): self.rule = rule self.match = match", "alert. At minimum, this should contain a field type corresponding", "= self.rule['email'] if 'email_from_field' in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field'])", "self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self):", "len(issues): return issues[0] def comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule,", "type else: self.jira_args[arg_name] = {'name': value} def get_priorities(self): \"\"\" Creates", "True) else: title = self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title", "# Set proper headers headers = { \"Content-Type\": \"application/json\", \"Accept\":", "= self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] #", "Simple string types if array_items in ['string', 'date', 'datetime']: #", "self.jira_args['assignee'] = {'name': assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def", "self.rule.get('query_key', None) for match in matches: if qk in match:", "get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): \"\"\" Requested", "body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: body +=", "= self.client.fields() for jira_field, value in self.rule.iteritems(): # If we", "self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age =", "value known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets',", "else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object): \"\"\" Creates a string", "if val is None else val alert_text = alert_text.format(**kw) self.text", "not in account_conf: raise EAException('Account file must have user and", "%s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert", "% (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at", "found. Valid priorities are %s\" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self):", "a convenience, support the scenario wherein the user only provides", "' ') title = title.replace('\\\\', '\\\\\\\\') date = (datetime.datetime.now() -", "than 10000 characters if (len(body) > 9999): body = body[:9980]", "%s\" % e) elastalert_logger.info(\"Trigger sent to VictorOps\") def get_info(self): return", "{'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): \"\"\" Creates a Gitter", "it would have been matched in the lookup_es_key call above", "self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)", "= { \"description\": description, \"short_description\": self.rule['short_description'], \"comments\": self.rule['comments'], \"assignment_group\": self.rule['assignment_group'],", "a definition for the jira field '{0}'\".format(normalized_jira_field)) arg_name = field['id']", "the exception, preserve the stack-trace, and give some # context", "index to id. \"\"\" priorities = self.client.priorities() self.priority_ids = {}", "to add '{0}' as a watcher. Does the user exist?\\n{1}\"", "if arg_type in ['string', 'date', 'datetime']: # Special case for", "+ self.rule['email_add_domain'] for name in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8')", "dict: # Make a copy since we may be modifying", "except KeyError as e: raise EAException(\"Error formatting command: %s\" %", "(key, value_str) def _pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True,", "in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception as ex: #", "email to %s\" % (to_addr)) def create_default_title(self, matches): subject =", "self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] =", "match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys, count in match_aggregation.iteritems():", "self.rule['email_from_field']) if isinstance(recipient, basestring): if '@' in recipient: to_addr =", "# https://api.slack.com/docs/formatting body = body.encode('UTF-8') body = body.replace('&', '&amp;') body", "not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include a count", "= missing if val is None else val alert_text =", "'': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url", "except RequestException as e: raise EAException(\"Error posting to VictorOps: %s\"", "% ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self,", "to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] =", "for key, counts in self.match.items(): if key.startswith('top_events_'): self.text += '%s:\\n'", "else: payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url: try: response", "object, fallback to str pass self.text += '%s: %s\\n' %", "for arg in alert_text_args] # Support referencing other top-level rule", "'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors:", "e: raise EAException(\"Error posting to slack: %s\" % e) elastalert_logger.info(\"Alert", "None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self, matches):", "= self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] #", "aggregated alerts with dashes if len(matches) > 1: body +=", "__init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl =", "= self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''),", "self.pipeline['jira_ticket']) body += '\\nJIRA ticket: %s' % (url) to_addr =", "= requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status()", "jql = 'project=%s AND summary~\"%s\" and created >= \"%s\"' %", "for match in matches: payload = match if self.post_all_values else", "not in self.rule: self.text += self.rule['name'] + '\\n\\n' self._add_custom_alert_text() self._ensure_new_line()", "these are strings, but # in reality, they are required", "self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None) # Deprecated", "field['schema']): raise Exception(\"Could not determine schema information for the jira", "frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if", "EAException('Account file must have user and password fields') self.user =", "value correctly # If the schema information is not available,", "self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login,", "def alert(self, matches): body = u'⚠ *%s* ⚠ ```\\n' %", "body = self.get_aggregation_summary_text(matches) for match in matches: body += unicode(BasicMatchString(self.rule,", "try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except", "in reality, they are required to be provided as an", "if type(value) == int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else:", "for summary_table_fields ==> {0}:\\n\\n\".format( summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count)", "= [recipient] elif 'email_add_domain' in self.rule: to_addr = [recipient +", "return None elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for", "'@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body } if", "{ \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.servicenow_proxy}", "be added raise Exception( \"Exception encountered when trying to add", "self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login =", "type(value) != list: value = [value] array_items = field['schema']['items'] #", "that these are strings, but # in reality, they are", "in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To']", "in matches: payload = match if self.post_all_values else {} payload.update(self.post_static_payload)", "value # Number type elif arg_type == 'number': self.jira_args[arg_name] =", "comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match,", "such, no tickets will ever be found.' % ( msg,", "self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self, matches): body = self.create_alert_body(matches)", "import pretty_ts from util import ts_now from util import ts_to_dt", "if it was provided proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy", "= self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id,", "if type(root) == list: # Make a copy since we", "in self.rule: title = self.create_default_title(matches, True) else: title = self.create_title(matches)", "def alert(self, matches): body = self.create_alert_body(matches) # HipChat sends 400", "self.new_style_string_format = True def alert(self, matches): # Format the command", "= [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'):", "except RequestException as e: raise EAException(\"Error posting to pagerduty: %s\"", "= [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] # Populate values", "arg_type == 'array': # As a convenience, support the scenario", "== dict or type(item) == list: self.resolve_rule_references(root[i]) else: root[i] =", "smtplib import SMTP_SSL from smtplib import SMTPAuthenticationError from smtplib import", "Convert underscores to spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower() #", "self.create_default_title(matches) def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in", "If there is a query_key, use that in the title", "utf-8 -*- import copy import datetime import json import logging", "self.rule['name'] def get_account(self, account_file): \"\"\" Gets the username and password", "'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label',", "'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority',", "HTML, pass along only first 1024 chars raise EAException(\"Error connecting", "[command_arg % matches[0] for command_arg in self.rule['command']] self.last_command = command", "self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except RequestException", "x, y in self.match.items() if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items)", "# If the schema information is not available, raise an", "Returns a dictionary of data related to this alert. At", "should be simplified to use only one or the other.'", "to avoid redundant copy/paste if type(root) == list: # Make", "BasicMatchString(object): \"\"\" Creates a string containing fields in match for", "Valid priorities are %s\" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): #", "%s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for spikes count", "if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None)", "# Note this is only the case for two built-in", "+= '\\n----------------------------------------\\n' body += u' ```' headers = {'content-type': 'application/json'}", "'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid =", "import Client as TwilioClient from util import EAException from util", "as an es result key, since it would have been", "set it # Note this is only the case for", "isn't already if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] # If", "(' '.join(command), e)) def get_info(self): return {'type': 'command', 'command': '", "def alert(self, matches): qk = self.rule.get('query_key', None) for match in", "in self.rule: to_addr = [recipient + self.rule['email_add_domain']] elif isinstance(recipient, list):", "email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ',", "get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter): \"\"\" Sends an email", "return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): \"\"\"", "= self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' %", "matches): body = self.get_aggregation_summary_text(matches) for match in matches: body +=", "posting to Exotel\"), None, sys.exc_info()[2] elastalert_logger.info(\"Trigger sent to Exotel\") def", "self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority')", "alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args] # Support", "self.rule['command']: logging.warning('Warning! You could be vulnerable to shell injection!') self.rule['command']", "as an API error that will bubble up self.jira_args[arg_name] =", "%s\" % e) elastalert_logger.info(\"Alert sent to ServiceNow\") def get_info(self): return", "= self.rule.get('victorops_entity_display_name', 'no entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' %", "EAException(\"Error posting to VictorOps: %s\" % e) elastalert_logger.info(\"Trigger sent to", "'password' not in account_conf: raise EAException('Account file must have user", "qk = self.rule.get('query_key', None) for match in matches: if qk", "% (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s and status", "(count) return title def get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter):", "[{'value': v} for v in value] else: self.jira_args[arg_name] = value", "in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise", "except RequestException as e: raise EAException(\"Error posting to Gitter: %s\"", "two built-in types, id: issuekey and id: thumbnail if not", "= self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')", "alert. Match is a dictionary of information about the alert.", "' - %s' % (qk) return subject def get_info(self): return", "alert(self, matches): # Format the command and arguments try: if", "def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy =", "as objects with an identifier 'key' elif array_items == 'option':", "name in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches)", "self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches): \"\"\" Each", "match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1", "'id' or the 'name' field. Therefore, try both just in", "if '%' in self.rule['command']: logging.warning('Warning! You could be vulnerable to", "for each alert \"\"\" required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self,", "modifying the contents of the structure we're walking for i,", "& set(self.bump_in_statuses)) if intersection: msg = '%s Both have common", "'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): \"\"\" Send alert using AWS", "value_str.replace('\\\\n', '\\n') if type(value) in [list, dict]: try: value_str =", "title = 'ElastAlert: %s' % (self.rule['name']) if for_search: return title", "self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override =", "i in range(len(incident_key_values)): if incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i])", "if self.post_all_values else {} payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items():", "again at %s\\n%s\" % (timestamp, text) self.client.add_comment(ticket, comment) def alert(self,", "= yaml_loader(account_file) if 'user' not in account_conf or 'password' not", "self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for arg", "body = self.create_alert_body(matches) # post to pagerduty headers = {'content-type':", "else, we will do best-effort and try to set a", "\"\"\" priorities = self.client.priorities() self.priority_ids = {} for x in", "= 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' %", "email.utils import formatdate from smtplib import SMTP from smtplib import", "match))) def get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter): \"\"\" Sends", "self.pagerduty_client_name, 'details': { \"information\": body.encode('UTF-8'), }, } # set https", "self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self, matches): body", "to decide how to set the value correctly # If", "else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys, count in", "self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response =", "message via bot api for each alert \"\"\" required_options =", "matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if", "= self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We used to support", "conn.disconnect() def get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter): \"\"\" The", "Make a copy since we may be modifying the contents", "'mrkdwn_in': ['text', 'pretext'], 'fields': [] } ] } if self.slack_icon_url_override", "exception since we don't know how to set it #", "basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary =", "self.user = account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter): \"\"\" The", "avoid redundant copy/paste if type(root) == list: # Make a", "= dict([(x, y) for x, y in self.match.items() if not", "# 2. A custom field that a JIRA admin has", "ending for text/html if self.hipchat_message_format == 'html': body = body.replace('\\n',", "If the schema information is not available, raise an exception", "incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] =", "\"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.servicenow_proxy} if", "'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): \"\"\" Send a Telegram message via", "configuration. \"\"\" required_options = frozenset([]) def __init__(self, rule): self.rule =", "= self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def alert(self, matches): client", "command_arg in self.rule['command']] else: command = [command_arg % matches[0] for", "preformatted_text class Alerter(object): \"\"\" Base class for types of alerts.", "e) elastalert_logger.info(\"Alert sent to ServiceNow\") def get_info(self): return {'type': 'ServiceNow',", "e: logging.exception(\"Error while searching for JIRA ticket using jql '%s':", "to the specified endpoint(s). \"\"\" for match in matches: payload", "rule before calling alert() self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self,", "url in self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,", "entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key)", "is not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError: logging.error(\"Priority", "representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We used to", "title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket:", "when trying to add '{0}' as a watcher. Does the", "not work if there is a top-level rule property with", "e: raise EAException(\"Error posting to VictorOps: %s\" % e) elastalert_logger.info(\"Trigger", "specified endpoint(s). \"\"\" for match in matches: payload = match", "# Log a warning to ElastAlert saying that we couldn't", "a broker. \"\"\" required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def", "'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body = self.create_alert_body(matches) # post to", "great! If not, it will manifest itself as an API", "'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number'])", "% (self.rule['name']) if for_search: return title title += ' -", "+= ' - %s+ events' % (count) return title def", "if len(matches) > 1: body += '\\n----------------------------------------\\n' return body def", "= self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file =", "field. Therefore, try both just in case for identifier in", "value in root.copy().iteritems(): if type(value) == dict or type(value) ==", "key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\\\n', '\\n') if type(value) in", "relevant information to the alert. \"\"\" if 'alert_subject' in self.rule:", "in match_aggregation.iteritems(): text_table.add_row([key for key in keys] + [count]) text", "be passed as objects with an identifier 'key' elif array_items", "MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From']", "certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error)", "= body[:9980] + '..(truncated)' # Use appropriate line ending for", "self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is necessary", "super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url =", "Slack\") def get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url}", "used, e.g. as an e-mail subject or JIRA issue summary.", "try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except", "= {} for name, kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match,", "to HipChat: %s\" % e) elastalert_logger.info(\"Alert sent to HipChat room", "self.victorops_proxy} if self.victorops_proxy else None payload = { \"message_type\": self.victorops_message_type,", "used by a rule before calling alert() self.pipeline = None", "for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate", "of built-in fields that we explicitly know how to set", "of complex types that have to be passed as objects", "in field['schema']): raise Exception(\"Could not determine schema information for the", "self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn =", "\"\"\" Sends an email alert \"\"\" required_options = frozenset(['email']) def", "Include a count aggregation so that we can see at", "+= unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with", "from util import ts_now from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder):", "posting to pagerduty: %s\" % e) elastalert_logger.info(\"Trigger sent to PagerDuty\")", "== 'array': # As a convenience, support the scenario wherein", "logging import subprocess import sys import warnings from email.mime.text import", "'{0}'\".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle arrays of simple types", "self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self, matches):", "information about the alert. :param match: A dictionary of relevant", "will ever be found.' % ( msg, ','.join(intersection)) msg +=", "' This should be simplified to use only one or", "self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color',", ":param match: A dictionary of relevant information to the alert.", "text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches, for_search=False):", "https proxy, if it was provided proxies = {'https': self.telegram_proxy}", "Separate text of aggregated alerts with dashes if len(matches) >", "else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp", "elif 'email_add_domain' in self.rule: to_addr = [recipient + self.rule['email_add_domain']] elif", "data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except RequestException as", ") response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to", "def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for", "return value def alert(self, match): \"\"\" Send an alert. Match", "giving the user-facing API a more representative name self.labels =", "as e: raise EAException(\"Error while running command %s: %s\" %", "%s\" % (' '.join(command), e)) def get_info(self): return {'type': 'command',", "slack headers = {'content-type': 'application/json'} # set https proxy, if", "supported field in JIRA that we don't have on our", "SlackAlerter(Alerter): \"\"\" Creates a Slack room message for each alert", "if self.slack_proxy else None payload = { 'username': self.slack_username_override, 'channel':", "to Slack\") def get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url':", "auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException", "= self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers", "text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an aggregate count for", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to pagerduty:", "self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches):", "a HipChat room notification for each alert \"\"\" required_options =", "def alert(self, matches): \"\"\" Each match will trigger a POST", "e) elastalert_logger.info(\"Trigger sent to VictorOps\") def get_info(self): return {'type': 'victorops',", "[lookup_es_key(matches[0], arg) for arg in alert_subject_args] # Support referencing other", "not in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple]", "get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter): \"\"\" The debug alerter", "if 'aggregation' in self.rule and 'summary_table_fields' in self.rule: summary_table_fields =", "or list if type(self.components) != list: self.jira_args['components'] = [{'name': self.components}]", "username and password from an account file. :param account_file: Name", "try: response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies", "') title = title.replace('\\\\', '\\\\\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')", "= self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url:", "# {\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}}", "jira_field, value in self.rule.iteritems(): # If we find a field", "self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are", "matches[0].get('spike_count') if count: title += ' - %s+ events' %", "label. This allows us to maintain backwards compatibility # while", "a Gitter activity message for each alert \"\"\" required_options =", "user-facing API a more representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component'))", "JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as e: #", "posting to VictorOps: %s\" % e) elastalert_logger.info(\"Trigger sent to VictorOps\")", "'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): \"\"\" Send a Telegram message", "proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None payload =", "= unicode(value) value_str.replace('\\\\n', '\\n') if type(value) in [list, dict]: try:", "% e) elastalert_logger.info(\"Alert sent to Slack\") def get_info(self): return {'type':", "twilio.rest import Client as TwilioClient from util import EAException from", "SMTPAuthenticationError as e: raise EAException(\"SMTP username/password rejected: %s\" % (e))", "a VictorOps Incident for each alert \"\"\" required_options = frozenset(['victorops_api_key',", "from requests.exceptions import RequestException from staticconf.loader import yaml_loader from texttable", "fields should be found in the 'id' or the 'name'", "= self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False)", "val for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key", "also giving the user-facing API a more representative name self.components", "a copy since we may be modifying the contents of", "= super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text", "definition for the jira field '{0}'\".format(normalized_jira_field)) arg_name = field['id'] #", "import requests import stomp from exotel import Exotel from jira.client", "%s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s,", "def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url,", "matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception(\"Error while", "None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value alert_subject_values", "\"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.post_proxy} if self.post_proxy", "= [lookup_es_key(self.match, arg) for arg in alert_text_args] # Support referencing", "title.replace('\\\\', '\\\\\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s", "teams: %s\" % e) elastalert_logger.info(\"Alert sent to MS Teams\") def", "provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload", "self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value alert_text_values = [missing if", "== dict: # Make a copy since we may be", "self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name':", "above if val is None: val = self.rule.get(name) kw[kw_name] =", "key.startswith('top_events_'): self.text += '%s:\\n' % (key[11:]) top_events = counts.items() if", "for key in summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple]", "= match_aggregation[key_tuple] + 1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key", "= self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets", "').lower() # All jira fields should be found in the", "'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number',", "class JiraAlerter(Alerter): \"\"\" Creates a Jira ticket for each alert", "(ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError", "# Simple string types if arg_type in ['string', 'date', 'datetime']:", "count = matches[0].get('spike_count') if count: title += ' - %s+", "[] } ] } if self.slack_icon_url_override != '': payload['icon_url'] =", "an es result key, since it would have been matched", "datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport", "on the jira server (built-ins and custom ones) fields =", "'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in':", "alerter uses a Python logger (by default, alerting to terminal).", "Teams Conversation Message for each alert \"\"\" required_options = frozenset(['ms_teams_webhook_url',", "normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) if field: break if", "words appear to be ok title = title.replace(' - ',", "e: raise EAException(\"Error formatting command: %s\" % (e)) # Run", "RequestException from staticconf.loader import yaml_loader from texttable import Texttable from", "'value', or something else # If it works, great! If", "%s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert: %s'", "elastalert_logger.info(\"Trigger sent to Twilio\") def get_info(self): return {'type': 'twilio', 'twilio_client_name':", "the other.' logging.warning(msg) self.jira_args = {'project': {'key': self.project}, 'issuetype': {'name':", "%s\" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): \"\"\" Creates a HipChat room", "= frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url']", "def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl", "if self.victorops_proxy else None payload = { \"message_type\": self.victorops_message_type, \"entity_display_name\":", "bcc = self.rule.get('bcc') if bcc and isinstance(bcc, basestring): self.rule['bcc'] =", "# Support single watcher or list if type(self.watchers) != list:", "{ 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True }", "self.text += self.rule['name'] + '\\n\\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') !=", "VictorOpsAlerter(Alerter): \"\"\" Creates a VictorOps Incident for each alert \"\"\"", "cc then also convert it a list if it isn't", "self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT')", "single watcher or list if type(self.watchers) != list: self.watchers =", "status in (%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql =", "not field: # Log a warning to ElastAlert saying that", "self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '')", "as e: raise EAException(\"Error posting to slack: %s\" % e)", "unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = \"This alert", "elastalert_logger from util import lookup_es_key from util import pretty_ts from", "' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for", "alert(self, matches): body = self.create_alert_body(matches) # post to Gitter headers", "= self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password',", "how many of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields", "value} else: self.jira_args[arg_name] = value # Number type elif arg_type", "pipe data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'):", "try: value_str = self._pretty_print_as_json(value) except TypeError: # Non serializable object,", "or 'password' not in account_conf: raise EAException('Account file must have", "appropriate line ending for text/html if self.hipchat_message_format == 'html': body", "== 'html': body = body.replace('\\n', '<br />') # Post to", "required_options = frozenset([]) def __init__(self, rule): self.rule = rule #", "the username and password from an account file. :param account_file:", "'&lt;') body = body.replace('>', '&gt;') return body def alert(self, matches):", "with rule level properties too for i in range(len(incident_key_values)): if", "% e) elastalert_logger.info(\"Alert sent to Gitter\") def get_info(self): return {'type':", "super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def", "Requested elasticsearch indices are sent by HTTP POST. Encoded with", "matches): \"\"\" Creates custom alert title to be used, e.g.", "'%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try:", "For anything else, we will do best-effort and try to", "self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server", "'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age',", "not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix def alert(self, matches):", "payload = { \"message_type\": self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\", \"state_message\":", "= { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True", "= False if isinstance(self.rule['command'], basestring): self.shell = True if '%'", "have been matched in the lookup_es_key call above for i", "in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name] = value", "to the alert. \"\"\" raise NotImplementedError() def get_info(self): \"\"\" Returns", "in match_items: if key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\\\n', '\\n')", "import copy import datetime import json import logging import subprocess", "= [self.rule['command']] self.new_style_string_format = False if 'new_style_string_format' in self.rule and", "the alert. \"\"\" raise NotImplementedError() def get_info(self): \"\"\" Returns a", "object, using 'name' as the key # This may not", "def alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body =", "alert(self, matches): qk = self.rule.get('query_key', None) for match in matches:", "and fail to load the alert entirely? Probably the latter...", "'markdown', 'disable_web_page_preview': True } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder),", "else: self.jira_args['components'] = [{'name': component} for component in self.components] if", "and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])", "show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def", "that can be used as custom fields require special handling", "def create_alert_body(self, matches): body = self.description + '\\n' body +=", "to Twilio\") def get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class", "raise NotImplementedError() def get_info(self): \"\"\" Returns a dictionary of data", "case for identifier in ['name', 'id']: field = next((f for", "the same name # as an es result key, since", "requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies)", "= self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname,", "types that can be used as custom fields require special", "at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for", "text) self.client.add_comment(ticket, comment) def alert(self, matches): title = self.create_title(matches) if", "publishes alerts via stomp to a broker. \"\"\" required_options =", "= frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a static set", "self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level =", "def get_arbitrary_fields(self): # This API returns metadata about all the", "the user exist?\\n{1}\" .format( watcher, ex )), None, sys.exc_info()[2] except", "self.telegram_proxy else None payload = { 'chat_id': self.telegram_room_id, 'text': body,", "\"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy", "in JIRA that we don't have on our radar #", "in the lookup_es_key call above if val is None: val", "class StompAlerter(Alerter): \"\"\" The stomp alerter publishes alerts via stomp", "self.components}] else: self.jira_args['components'] = [{'name': component} for component in self.components]", "array_items in ['string', 'date', 'datetime']: # Special case for multi-select", "required_options = set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command =", "= key_value incident_key_values = ['<MISSING VALUE>' if val is None", "self.rule.get('hipchat_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # HipChat", "fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for %s at %s:' %", "from jira.client import JIRA from jira.exceptions import JIRAError from requests.exceptions", "alert_value alert_subject_values = ['<MISSING VALUE>' if val is None else", "def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url,", "= SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in", "matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if", "'') return body def alert(self, matches): body = self.create_alert_body(matches) body", "10000 characters if (len(body) > 9999): body = body[:9980] +", "\"description\": description, \"short_description\": self.rule['short_description'], \"comments\": self.rule['comments'], \"assignment_group\": self.rule['assignment_group'], \"category\": self.rule['category'],", "key, value in match_items: if key.startswith('top_events_'): continue value_str = unicode(value)", "# We used to support only a single label. This", "date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~\"%s\"", "self.victorops_routing_key} class TelegramAlerter(Alerter): \"\"\" Send a Telegram message via bot", "def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options", "not available, raise an exception since we don't know how", "matched in the lookup_es_key call above if val is None:", "__init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring):", "\"\"\" Base class for types of alerts. :param rule: The", "break if not field: # Log a warning to ElastAlert", "rule): self.rule = rule # pipeline object is created by", "= self.rule.get('exotel_message_body', '') def alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token)", "= self.rule.get('servicenow_proxy', None) def alert(self, matches): for match in matches:", "title to be used, e.g. as an e-mail subject or", "self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches),", "matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except", "text of aggregated alerts with dashes if len(matches) > 1:", "contains user and password information. \"\"\" account_conf = yaml_loader(account_file) if", "(%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except JIRAError", "{ \"message_type\": self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\", \"state_message\": body }", "if response != 200: raise EAException(\"Error posting to Exotel, response", "(self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at %s:' %", "+= '\\n' def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x: x[0])", "title, date) if self.bump_in_statuses: jql = '%s and status in", "client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException", "elastalert_logger.info(\"HTTP Post alert sent.\") def get_info(self): return {'type': 'http_post', 'http_post_webhook_url':", "__init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id']", "as e: raise EAException(\"Error connecting to SMTP host: %s\" %", "def get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): \"\"\"", "= unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body'] = self.create_alert_body(matches)", "Exception(\"Could not find a definition for the jira field '{0}'\".format(normalized_jira_field))", "Creates a Microsoft Teams Conversation Message for each alert \"\"\"", "timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = \"This alert was triggered", "except JIRAError as e: raise EAException(\"Error creating JIRA ticket using", "= self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]", "a list if it isn't cc = self.rule.get('cc') if cc", "True if '%' in self.rule['command']: logging.warning('Warning! You could be vulnerable", "self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')", "matches: if qk in match: elastalert_logger.info( 'Alert for %s, %s", "All jira fields should be found in the 'id' or", "self.jira_args[arg_name] = [{'name': v} for v in value] # Handle", "get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options =", "in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields])", "# Some built-in jira types that can be used as", "in self.rule: if type(value) == int: return int(self.rule[strValue[1:-1]]) else: return", "self.server return None elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))", "is not None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s'", "if self.ms_teams_proxy else None payload = { '@type': 'MessageCard', '@context':", "'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body } if self.ms_teams_theme_color", "if self.watchers: for watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except", "+= \"Aggregation resulted in the following data for summary_table_fields ==>", "in value] else: self.jira_args[arg_name] = value elif array_items == 'number':", "value in self.rule.iteritems(): # If we find a field that", "'name' as the key # This may not work, as", "error) as e: raise EAException(\"Error connecting to SMTP host: %s\"", "in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s matched", "Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent sns notification to %s\" % (self.sns_topic_arn)) class", "'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { \"information\": body.encode('UTF-8'), },", "as TwilioClient from util import EAException from util import elastalert_logger", "(self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\\nJIRA ticket: %s' % (url) to_addr", "SMTPAuthenticationError from smtplib import SMTPException from socket import error import", "itself as an API error that will bubble up self.jira_args[arg_name]", "body = self.description + '\\n' body += self.get_aggregation_summary_text(matches) for match", "(self.rule['name']) return subject def alert(self, matches): body = self.create_alert_body(matches) session", "'') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy", "call above if val is None: val = self.rule.get(name) kw[kw_name]", "'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): \"\"\" Requested elasticsearch indices are", "def get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): \"\"\"", "to victorops headers = {'content-type': 'application/json'} # set https proxy,", "u' ```' headers = {'content-type': 'application/json'} # set https proxy,", "'' if 'aggregation' in self.rule and 'summary_table_fields' in self.rule: summary_table_fields", "to a list if it isn't already if isinstance(self.rule['email'], basestring):", "class SnsAlerter(Alerter): \"\"\" Send alert using AWS SNS service \"\"\"", "% (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\\nJIRA ticket: %s' % (url)", "code while running command %s\" % (' '.join(command))) except OSError", "{ \"description\": description, \"short_description\": self.rule['short_description'], \"comments\": self.rule['comments'], \"assignment_group\": self.rule['assignment_group'], \"category\":", "statuses of (%s). As such, no tickets will ever be", "self.components: # Support single component or list if type(self.components) !=", "raise EAException(\"Error posting to pagerduty: %s\" % e) elastalert_logger.info(\"Trigger sent", "multi-select custom types (the JIRA metadata says that these are", "alert_value: alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING VALUE>' if val", "self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary',", "# Here is a sample of one of them: #", "else None payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse':", "self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif type(root) == dict: #", "as e: logging.exception(\"Error while commenting on ticket %s: %s\" %", "the lookup_es_key call above for i in xrange(len(alert_text_values)): if alert_text_values[i]", "self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting on", "sys import warnings from email.mime.text import MIMEText from email.utils import", "Incident for each alert \"\"\" required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])", "= title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is necessary for search", "not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server return None", "proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None payload =", "data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException as e: raise", "\"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.post_proxy} if", "# If there is a cc then also convert it", "then also convert it to a list if it isn't", "= counts.items() if not top_events: self.text += 'No events found.\\n'", "= account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter): \"\"\" The stomp", "return body def get_aggregation_summary_text(self, matches): text = '' if 'aggregation'", "self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except", "self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches):", "```' headers = {'content-type': 'application/json'} # set https proxy, if", "import JIRA from jira.exceptions import JIRAError from requests.exceptions import RequestException", "strings or numbers if arg_type == 'array': # As a", "body = self.create_alert_body(matches) # post to victorops headers = {'content-type':", "data for summary_table_fields ==> {0}:\\n\\n\".format( summary_table_fields_with_count ) text_table = Texttable()", "self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url =", "( msg, ','.join(intersection)) msg += ' This should be simplified", "self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting on existing ticket %s'", "stomp from exotel import Exotel from jira.client import JIRA from", "if type(self.labels) != list: self.labels = [self.labels] self.jira_args['labels'] = self.labels", "'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a static set of built-in", "title def get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter): required_options =", "= [self.labels] self.jira_args['labels'] = self.labels if self.watchers: # Support single", "found.\\n' else: top_events.sort(key=lambda x: x[1], reverse=True) for term, count in", "= self.rule.get('victorops_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) #", "get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): \"\"\" Creates", "OR raise and fail to load the alert entirely? Probably", "def default(self, obj): if hasattr(obj, 'isoformat'): return obj.isoformat() else: return", "= [{'name': v} for v in value] # Handle non-array", "(the JIRA metadata says that these are strings, but #", "get_priorities(self): \"\"\" Creates a mapping of priority index to id.", "given rule. \"\"\" def __init__(self, rule, match): self.rule = rule", "and jira_bump_not_in_statuses (%s) are set.' % \\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection", "is necessary for search to work. Other special characters and", "self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')", "encountered when trying to add '{0}' as a watcher. Does", "if 'alert_text' not in self.rule: self.text += self.rule['name'] + '\\n\\n'", "for two built-in types, id: issuekey and id: thumbnail if", "{'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): \"\"\" Creates a VictorOps", "is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server def", "'\\n' def _add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text',", "self.create_alert_body(matches) # HipChat sends 400 bad request on messages longer", "get stripped search version if 'alert_subject' not in self.rule: title", "self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return", "= frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color =", "covered by the set that we are aware of, it", "= self.rule.get('telegram_proxy', None) def alert(self, matches): body = u'⚠ *%s*", "self.text += alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self):", "self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)])", "formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr +", "matches): qk = self.rule.get('query_key', None) for match in matches: if", "def find_existing_ticket(self, matches): # Default title, get stripped search version", "jira_args (%s): %s\" % (self.jira_args, e)) elastalert_logger.info(\"Opened Jira ticket: %s\"", "{'https': self.hipchat_proxy} if self.hipchat_proxy else None payload = { 'color':", "If the rule has a query_key, add that value plus", "in body.split('\\n'))).replace('\\n``````', '') return body def alert(self, matches): body =", "response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies )", "self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e: raise", "= self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses:", "to VictorOps\") def get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class", "self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException as", "self.match.items() if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob)", "1: body += '\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self, matches): text", "if jira_field.startswith('jira_') and jira_field not in self.known_field_list: # Remove the", "self.rule.get('query_key', None) fullmessage = {} for match in matches: if", "get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in self.rule and", "def comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp =", "# Include a count aggregation so that we can see", "create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) return subject", "= 'project=%s AND summary~\"%s\" and created >= \"%s\"' % (self.project,", "text += text_table.draw() + '\\n\\n' return unicode(text) def create_default_title(self, matches):", "response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies", "is None else val for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values)", "default(self, obj): if hasattr(obj, 'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self,", "assignee: self.jira_args['assignee'] = {'name': assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee')", "to ServiceNow: %s\" % e) elastalert_logger.info(\"Alert sent to ServiceNow\") def", "manifest itself as an API error that will bubble up", "was provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None", "Each match will trigger a POST to the specified endpoint(s).", "None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError: logging.error(\"Priority %s not", "key in summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple] =", "except KeyError: logging.error(\"Priority %s not found. Valid priorities are %s\"", "self.hipchat_room_id} class MsTeamsAlerter(Alerter): \"\"\" Creates a Microsoft Teams Conversation Message", "in the following data for summary_table_fields ==> {0}:\\n\\n\".format( summary_table_fields_with_count )", "if alert_value: alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING VALUE>' if", "val is None else val for val in incident_key_values] return", "https proxy, if it was provided proxies = {'https': self.gitter_proxy}", "get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for arg", "self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '')", "it means it is either: # 1. A built-in supported", "util import elastalert_logger from util import lookup_es_key from util import", "notification to %s\" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): \"\"\" Creates a", "self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email to a list if", "in enumerate(copy.copy(root)): if type(item) == dict or type(item) == list:", "= jira_field[5:].replace('_', ' ').lower() # All jira fields should be", "'\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self, matches): text = '' if", "[ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text',", "\"\"\" return {'type': 'Unknown'} def create_title(self, matches): \"\"\" Creates custom", "value_str) def _pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4,", "into description. description = str(BasicMatchString(self.rule, match)) # Set proper headers", "'<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start()", "or type(value) == list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def", "%s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def", "+= self.rule['name'] + '\\n\\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only':", "if it exists if self.pipeline is not None and 'jira_ticket'", "list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif type(root) == dict:", "'option': self.jira_args[arg_name] = {'value': value} # Complex type else: self.jira_args[arg_name]", "AWS SNS service \"\"\" required_options = frozenset(['sns_topic_arn']) def __init__(self, *args):", "self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key =", "e) elastalert_logger.info(\"HTTP Post alert sent.\") def get_info(self): return {'type': 'http_post',", "e: raise EAException(\"SMTP username/password rejected: %s\" % (e)) self.smtp.sendmail(self.from_addr, to_addr,", "if qk: subject += ' - %s' % (qk) return", "for v in value] # Also attempt to handle arrays", "else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp =", "and try to set a string value known_field_list = [", "self.format_body(body) # post to slack headers = {'content-type': 'application/json'} #", "# Maintain a static set of built-in fields that we", "self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches): client = TwilioClient(self.twilio_account_sid,", "isn't bcc = self.rule.get('bcc') if bcc and isinstance(bcc, basestring): self.rule['bcc']", "self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo()", "indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text = '' if 'alert_text'", "as e: logging.exception(\"Error while searching for JIRA ticket using jql", "'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): \"\"\" Creates a Microsoft Teams", "API a more representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) #", "or the 'name' field. Therefore, try both just in case", "Conversation Message for each alert \"\"\" required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary'])", "to set it # Note this is only the case", "self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status()", "exception, preserve the stack-trace, and give some # context as", "if self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color for url in", "in the lookup_es_key call above for i in xrange(len(alert_text_values)): if", "+= '\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self, matches): text = super(JiraAlerter,", "a glance how many of each aggregation_key were encountered summary_table_fields_with_count", "recipient: to_addr = [recipient] elif 'email_add_domain' in self.rule: to_addr =", "sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text = '' if", "of the structure we're walking for i, item in enumerate(copy.copy(root)):", "lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts']", "to_addr = [recipient + self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr =", "JIRAError as e: # JIRAError may contain HTML, pass along", "title = title.replace(' - ', ' ') title = title.replace('\\\\',", "'No events found.\\n' else: top_events.sort(key=lambda x: x[1], reverse=True) for term,", "(self.rule['name']) # If the rule has a query_key, add that", "Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def", "find that type? # OR raise and fail to load", "= self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self, matches):", "# For anything else, we will do best-effort and try", "match)) if len(matches) > 1: body += '\\n----------------------------------------\\n' return body", "add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix def alert(self, matches): body", "scenario wherein the user only provides # a single value", "[int(v) for v in value] # Also attempt to handle", "self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try:", "{'key': self.project}, 'issuetype': {'name': self.issue_type}} if self.components: # Support single", "self.priority_ids.keys())) def get_arbitrary_fields(self): # This API returns metadata about all", "self.rule['cc'] = [self.rule['cc']] # If there is a bcc then", "= self.create_alert_body(matches) body = self.format_body(body) # post to Teams headers", "'\\n\\n' return unicode(text) def create_default_title(self, matches): return self.rule['name'] def get_account(self,", "self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port =", "# post to Teams headers = {'content-type': 'application/json'} # set", "HipChat room %s\" % self.hipchat_room_id) def get_info(self): return {'type': 'hipchat',", "for url in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),", "key, counts in self.match.items(): if key.startswith('top_events_'): self.text += '%s:\\n' %", "= self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name') self.url", "in ['name', 'id']: field = next((f for f in fields", "name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy =", "= self.create_alert_body(matches) # post to pagerduty headers = {'content-type': 'application/json'}", "self.server return None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try:", "# There are likely others that will need to be", "%s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s", "assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): #", "https proxy, if it was provided proxies = {'https': self.hipchat_proxy}", "%s\" % e) elastalert_logger.info(\"Trigger sent to PagerDuty\") def get_incident_key(self, matches):", "response) except: raise EAException(\"Error posting to Exotel\"), None, sys.exc_info()[2] elastalert_logger.info(\"Trigger", "*%s* ⚠ ```\\n' % (self.create_title(matches)) for match in matches: body", "def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy =", "self.rule.get(\"fail_on_non_zero_exit\", False) and subp.wait(): raise EAException(\"Non-zero exit code while running", "mapping of priority index to id. \"\"\" priorities = self.client.priorities()", "of them: # {\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\",", "# as an es result key, since it would have", "to which watcher failed to be added raise Exception( \"Exception", "copy since we may be modifying the contents of the", "a Python logger (by default, alerting to terminal). \"\"\" def", "type(root) == list: # Make a copy since we may", "the contents of the structure we're walking for key, value", "1. A built-in supported field in JIRA that we don't", "Only_One_Label if type(value) != list: value = [value] array_items =", "= self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body =", "the case for two built-in types, id: issuekey and id:", "from email.utils import formatdate from smtplib import SMTP from smtplib", "rule # pipeline object is created by ElastAlerter.send_alert() # and", "TypeError: # Non serializable object, fallback to str pass self.text", "None elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match", "self.pagerduty_incident_key_args] # Populate values with rule level properties too for", "None payload = { 'message': body, 'level': self.gitter_msg_level } try:", "\"\"\" required_options = set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group',", "'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches): alerts = [] qk", "} if self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color for url", "is None: alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value", "import SMTPException from socket import error import boto3 import requests", "except TypeError: # Non serializable object, fallback to str pass", "for i, item in enumerate(copy.copy(root)): if type(item) == dict or", "self.hipchat_room_id) def get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter):", "\"state_message\": body } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,", "(self.project, title, date) if self.bump_in_statuses: jql = '%s and status", "intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg = '%s", "except TwilioRestException as e: raise EAException(\"Error posting to twilio: %s\"", "len(matches) > 1: body += '\\n----------------------------------------\\n' body += u' ```'", "Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} # There are likely others that", "self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no", "self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client", "val for val in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw'", "self.rule['assignment_group'], \"category\": self.rule['category'], \"subcategory\": self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"] }", "% (e)) # Run command and pipe data try: subp", "Name of the file which contains user and password information.", "'number': self.jira_args[arg_name] = [int(v) for v in value] # Also", "searching for JIRA ticket using jql '%s': %s\" % (jql,", "def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket =", "import lookup_es_key from util import pretty_ts from util import ts_now", "title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) # You", "by a rule before calling alert() self.pipeline = None self.resolve_rule_references(self.rule)", "Only as a follow-up action if self.watchers: for watcher in", "body = self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile", "self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number']", "in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body +=", "'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] # Some", "schema information for the jira field '{0}'\".format(normalized_jira_field)) arg_type = field['schema']['type']", "= client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response != 200: raise EAException(\"Error", "self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and", "self.create_alert_body(matches) # post to Gitter headers = {'content-type': 'application/json'} #", "for jira_field, value in self.rule.iteritems(): # If we find a", "isinstance(post_url, basestring): post_url = [post_url] self.post_url = post_url self.post_proxy =", "the 'id' or the 'name' field. Therefore, try both just", "class BasicMatchString(object): \"\"\" Creates a string containing fields in match", "as e: raise EAException(\"Error posting to Telegram: %s\" % e)", "\"%s\"' % (self.project, title, date) if self.bump_in_statuses: jql = '%s", "for custom types (the JIRA metadata says that these are", "elastalert_logger.info(\"Trigger sent to PagerDuty\") def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values", "that value plus timestamp to subject if 'query_key' in self.rule:", "\"application/json;charset=utf-8\" } proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None", "the user only provides # a single value for a", "= self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@'", "self.jira_args['assignee'] = {'name': self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user, self.password))", "an incident on PagerDuty for each alert \"\"\" required_options =", "'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid']", "HipChat room notification for each alert \"\"\" required_options = frozenset(['hipchat_auth_token',", "self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server", "alert entirely? Probably the latter... raise Exception(\"Could not find a", "logging.warning(msg) self.jira_args = {'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}} if", "# directly adjacent to words appear to be ok title", "import json import logging import subprocess import sys import warnings", "not add watchers on initial creation. Only as a follow-up", "count aggregation so that we can see at a glance", "contents of the structure we're walking for i, item in", "% matches[0] for command_arg in self.rule['command']] self.last_command = command except", "# Re-raise the exception, preserve the stack-trace, and give some", "'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg)", "'2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) )", "status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues =", "error import boto3 import requests import stomp from exotel import", "id: issuekey and id: thumbnail if not ('schema' in field", "self.hipchat_proxy} if self.hipchat_proxy else None payload = { 'color': self.hipchat_msg_color,", "self.slack_proxy} if self.slack_proxy else None payload = { 'username': self.slack_username_override,", "and password fields') self.user = account_conf['user'] self.password = account_conf['password'] class", "] } if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else:", "self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args',", "else None try: response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False),", "% (e)) except SMTPAuthenticationError as e: raise EAException(\"SMTP username/password rejected:", "'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] #", "to be provided as an object. if 'custom' in field['schema']", "Slack room message for each alert \"\"\" required_options = frozenset(['slack_webhook_url'])", "except Exception as ex: # Re-raise the exception, preserve the", "post to victorops headers = {'content-type': 'application/json'} # set https", "not in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql)", "try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e: raise EAException(\"Error", "an object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:", "self.rule['command'] = [self.rule['command']] self.new_style_string_format = False if 'new_style_string_format' in self.rule", "twilio: %s\" % e) elastalert_logger.info(\"Trigger sent to Twilio\") def get_info(self):", "'email_add_domain' in self.rule: to_addr = [name + self.rule['email_add_domain'] for name", "should contain a field type corresponding to the type of", "broker. \"\"\" required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self,", "_add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if", "{ 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments':", "for key in keys] + [count]) text += text_table.draw() +", "self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if", "arg_type == 'number': self.jira_args[arg_name] = int(value) elif arg_type == 'option':", "+ self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response !=", "headers = { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies =", "{'value': value} # Complex type else: self.jira_args[arg_name] = {'name': value}", "the key # This may not work, as the key", "class VictorOpsAlerter(Alerter): \"\"\" Creates a VictorOps Incident for each alert", "to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr", "%s\" % (e)) except SMTPAuthenticationError as e: raise EAException(\"SMTP username/password", "(self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server']", "headers = {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description':", "alert \"\"\" required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain", "self.client.priorities() self.priority_ids = {} for x in range(len(priorities)): self.priority_ids[x] =", "super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number =", "body, 'level': self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder),", "information to the alert. \"\"\" raise NotImplementedError() def get_info(self): \"\"\"", "%s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else:", "class CommandAlerter(Alerter): required_options = set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args)", "be found.' % ( msg, ','.join(intersection)) msg += ' This", "by ElastAlerter.send_alert() # and attached to each alerters used by", "structure we're walking for i, item in enumerate(copy.copy(root)): if type(item)", "self.get_priorities() self.get_arbitrary_fields() except JIRAError as e: # JIRAError may contain", "API error that will bubble up self.jira_args[arg_name] = [{'name': v}", "self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user,", "# Parse everything into description. description = str(BasicMatchString(self.rule, match)) #", "to words appear to be ok title = title.replace(' -", "elastalert_logger.info(\"Opened Jira ticket: %s\" % (self.issue)) if self.pipeline is not", "lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'],", "+= ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count", "self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email", "class HTTPPostAlerter(Alerter): \"\"\" Requested elasticsearch indices are sent by HTTP", "posting to slack: %s\" % e) elastalert_logger.info(\"Alert sent to Slack\")", "+= '%s:\\n' % (key[11:]) top_events = counts.items() if not top_events:", "only one or the other.' logging.warning(msg) self.jira_args = {'project': {'key':", "raise and fail to load the alert entirely? Probably the", "super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url =", "fallback to str pass self.text += '%s: %s\\n' % (key,", "class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self,", "datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is not None:", "% (self.rule['name']) # If the rule has a query_key, add", "'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from", "in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self): return", "__str__(self): self.text = '' if 'alert_text' not in self.rule: self.text", "VALUE>' if val is None else val for val in", "of data related to this alert. At minimum, this should", "provided proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None payload", "lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for %s", "to spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower() # All jira", "match)) fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname =", "\"\"\" Send alert using AWS SNS service \"\"\" required_options =", "'text': body } if self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color", "field type corresponding to the type of Alerter. \"\"\" return", "ticket if it exists if self.pipeline is not None and", "fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule']", "self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter):", "= requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status()", "title title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) #", "Add JIRA ticket if it exists if self.pipeline is not", "the set that we are aware of, it means it", "is a query_key, use that in the title if 'query_key'", "= {'name': value} def get_priorities(self): \"\"\" Creates a mapping of", "that in the title if 'query_key' in self.rule and lookup_es_key(matches[0],", "= matches[0].get('spike_count') if count: title += ' - %s+ events'", "of the structure we're walking for key, value in root.copy().iteritems():", "% (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else:", "handling # Here is a sample of one of them:", "*args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id')", "self.servicenow_proxy else None payload = { \"description\": description, \"short_description\": self.rule['short_description'],", "= '@' + add_suffix def alert(self, matches): body = self.create_alert_body(matches)", "import yaml_loader from texttable import Texttable from twilio.base.exceptions import TwilioRestException", "= self.description + '\\n' body += self.get_aggregation_summary_text(matches) for match in", "self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for v in value] else:", "= { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string,", "case for custom types (the JIRA metadata says that these", "matches): body = self.create_alert_body(matches) # post to Gitter headers =", "{'https': self.post_proxy} if self.post_proxy else None for url in self.post_url:", "as an object, using 'name' as the key # This", "= {} for x in range(len(priorities)): self.priority_ids[x] = priorities[x].id def", "= [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')", "alert_text.format(**kw) self.text += alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def", "if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None)", "match): \"\"\" Send an alert. Match is a dictionary of", "cc and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] # If there", "\"short_description\": self.rule['short_description'], \"comments\": self.rule['comments'], \"assignment_group\": self.rule['assignment_group'], \"category\": self.rule['category'], \"subcategory\": self.rule['subcategory'],", "file which contains user and password information. \"\"\" account_conf =", "was triggered again at %s\\n%s\" % (timestamp, text) self.client.add_comment(ticket, comment)", "sends 400 bad request on messages longer than 10000 characters", "self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type')", "create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) # If", "to id. \"\"\" priorities = self.client.priorities() self.priority_ids = {} for", "RequestException as e: raise EAException(\"Error posting to pagerduty: %s\" %", "Deprecated self.profile = self.rule.get('aws_profile', None) def create_default_title(self, matches): subject =", "# Make a copy since we may be modifying the", "json.dumps(matches, cls=DateTimeEncoder) + '\\n' stdout, stderr = subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\",", "matches): for match in matches: # Parse everything into description.", "= match if self.post_all_values else {} payload.update(self.post_static_payload) for post_key, es_key", "= [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter,", "# in reality, they are required to be provided as", "'%s: %s\\n' % (key, value_str) def _pretty_print_as_json(self, blob): try: return", "= self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'", "= frozenset([]) def __init__(self, rule): self.rule = rule # pipeline", "basestring): self.rule['cc'] = [self.rule['cc']] # If there is a bcc", "= self.server return None elastalert_logger.info('Commenting on existing ticket %s' %", "'stomp'} class DebugAlerter(Alerter): \"\"\" The debug alerter uses a Python", "list if type(self.components) != list: self.jira_args['components'] = [{'name': self.components}] else:", "was provided proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None", "es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers = {", "email to a list if it isn't already if isinstance(self.rule['email'],", "special handling # Here is a sample of one of", "properties to avoid redundant copy/paste if type(root) == list: #", "preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object): \"\"\" Base", "try: if self.priority is not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}", "# pipeline object is created by ElastAlerter.send_alert() # and attached", "lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0],", "Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name'] + self.sms_body response =", "400 bad request on messages longer than 10000 characters if", "self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file')", "self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, \"sendMessage\") self.telegram_proxy", "= frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host',", "the key might actually be 'key', 'id', 'value', or something", "terminal). \"\"\" def alert(self, matches): qk = self.rule.get('query_key', None) for", "command and arguments try: if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for", "def __str__(self): self.text = '' if 'alert_text' not in self.rule:", "for the jira field '{0}'\".format(normalized_jira_field)) arg_name = field['id'] # Check", "_add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts in", "self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url]", "EAException(\"Error posting to Telegram: %s\" % e) elastalert_logger.info( \"Alert sent", "'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server']", "self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else: command", "ticket using jql '%s': %s\" % (jql, e)) return None", "self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token)", "self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match,", "def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id", "if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] =", "class HipChatAlerter(Alerter): \"\"\" Creates a HipChat room notification for each", "self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none')", "if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is not None: self.pipeline['jira_ticket']", "only a single component. This allows us to maintain backwards", "e: logging.exception(\"Error while commenting on ticket %s: %s\" % (ticket,", "frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key']", "field that is not covered by the set that we", "on our radar # 2. A custom field that a", "= MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(to_addr)", "find a field that is not covered by the set", "'') self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority =", "self.ms_teams_alert_fixed_width: body = body.replace('`', \"'\") body = \"```{0}```\".format('```\\n\\n```'.join(x for x", "if self.pagerduty_proxy else None try: response = requests.post( self.url, data=json.dumps(payload,", "\\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection:", "es_key) headers = { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies", "each alerters used by a rule before calling alert() self.pipeline", "Sends an email alert \"\"\" required_options = frozenset(['email']) def __init__(self,", "- %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for spikes", "= { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches),", "xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value:", "continue value_str = unicode(value) value_str.replace('\\\\n', '\\n') if type(value) in [list,", "root[i] = self.resolve_rule_reference(item) elif type(root) == dict: # Make a", "json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class", "subject if 'query_key' in self.rule: qk = matches[0].get(self.rule['query_key']) if qk:", "[summary_table_fields] # Include a count aggregation so that we can", "self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination,", "custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule):", "dict]: try: value_str = self._pretty_print_as_json(value) except TypeError: # Non serializable", "'\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches)", "title, get stripped search version if 'alert_subject' not in self.rule:", "'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match,", "each alert \"\"\" required_options = frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter,", "'%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses:", "'none') self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self, body): # https://api.slack.com/docs/formatting", "sent to HipChat room %s\" % self.hipchat_room_id) def get_info(self): return", "static set of built-in fields that we explicitly know how", "alert \"\"\" required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule)", "Gitter headers = {'content-type': 'application/json'} # set https proxy, if", "alert_value alert_text_values = [missing if val is None else val", "text_table.add_row([key for key in keys] + [count]) text += text_table.draw()", "else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))", "convert it a list if it isn't cc = self.rule.get('cc')", "self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components'] = [{'name': component} for", "each alert \"\"\" required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule):", "= [missing if val is None else val for val", "def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token =", "Set proper headers headers = { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\"", "self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy =", "def alert(self, match): \"\"\" Send an alert. Match is a", "types if array_items in ['string', 'date', 'datetime']: # Special case", "self.pipeline is not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server", "room notification for each alert \"\"\" required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id'])", "a Jira ticket for each alert \"\"\" required_options = frozenset(['jira_server',", "to pagerduty headers = {'content-type': 'application/json'} payload = { 'service_key':", "x in body.split('\\n'))).replace('\\n``````', '') return body def alert(self, matches): body", "should be found in the 'id' or the 'name' field.", "alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for", "body = self.format_body(body) # post to slack headers = {'content-type':", "raise EAException(\"Non-zero exit code while running command %s\" % ('", "dictionary of relevant information to the alert. \"\"\" raise NotImplementedError()", "= self.create_alert_body(matches) # post to Gitter headers = {'content-type': 'application/json'}", "'aggregation' in self.rule and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields']", "for match in matches: if qk in match: elastalert_logger.info( 'Alert", "lookup_es_key call above for i in xrange(len(alert_subject_values)): if alert_subject_values[i] is", "self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s'", "import datetime import json import logging import subprocess import sys", "if self.servicenow_proxy else None payload = { \"description\": description, \"short_description\":", "self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses", "= field['schema']['type'] # Handle arrays of simple types like strings", "e) elastalert_logger.info(\"Trigger sent to PagerDuty\") def get_incident_key(self, matches): if self.pagerduty_incident_key_args:", "'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter,", "= [] self.shell = False if isinstance(self.rule['command'], basestring): self.shell =", "a single component. This allows us to maintain backwards compatibility", "\"\"\" Creates a mapping of priority index to id. \"\"\"", "self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if", "triggered again at %s\\n%s\" % (timestamp, text) self.client.add_comment(ticket, comment) def", "glance how many of each aggregation_key were encountered summary_table_fields_with_count =", "= 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body = self.create_alert_body(matches) # post", "\"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.post_proxy} if self.post_proxy else", "', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date']", "matches): subject = 'ElastAlert: %s' % (self.rule['name']) # If the", "incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self): return {'type':", "# Try setting it as an object, using 'name' as", "user only provides # a single value for a multi-value", "int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return value def", "was provided proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None", "(url) to_addr = self.rule['email'] if 'email_from_field' in self.rule: recipient =", "SMTP_SSL from smtplib import SMTPAuthenticationError from smtplib import SMTPException from", "title.replace(' - ', ' ') title = title.replace('\\\\', '\\\\\\\\') date", "% (self.sns_topic_arn)) class HipChatAlerter(Alerter): \"\"\" Creates a HipChat room notification", "self.rule: kw = {} for name, kw_name in self.rule.get('alert_text_kw').items(): val", "proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None payload =", "\"\"\" Creates a Jira ticket for each alert \"\"\" required_options", "= session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent sns notification", "def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url,", "as a follow-up action if self.watchers: for watcher in self.watchers:", "_add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x: x[0]) for key, value", "https proxy, if it was provided proxies = {'https': self.slack_proxy}", "coding: utf-8 -*- import copy import datetime import json import", "value = [value] array_items = field['schema']['items'] # Simple string types", "== list: # Make a copy since we may be", "objects with an identifier 'key' elif array_items == 'option': self.jira_args[arg_name]", "query_key, use that in the title if 'query_key' in self.rule", "% ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def", "in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of", "= 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, \"sendMessage\") self.telegram_proxy = self.rule.get('telegram_proxy', None)", "= JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as e:", "body } if self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color for", "self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values =", "self.rule['comments'], \"assignment_group\": self.rule['assignment_group'], \"category\": self.rule['category'], \"subcategory\": self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\":", "set the value correctly # If the schema information is", "don't know how to set it # Note this is", "session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client =", "field: # Log a warning to ElastAlert saying that we", "alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args']", "%s\" % e) elastalert_logger.info(\"Alert sent to HipChat room %s\" %", "obj.isoformat() else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object): \"\"\" Creates a", "if it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy", "% e) elastalert_logger.info(\"Alert sent to HipChat room %s\" % self.hipchat_room_id)", "val is None: val = self.rule.get(name) kw[kw_name] = missing if", "EAException(\"SMTP username/password rejected: %s\" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close()", ">= inactivity_datetime: if self.pipeline is not None: self.pipeline['jira_ticket'] = None", "to_addr + self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp =", "in range(len(incident_key_values)): if incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if", "{ \"information\": body.encode('UTF-8'), }, } # set https proxy, if", "StompAlerter(Alerter): \"\"\" The stomp alerter publishes alerts via stomp to", "# Support referencing other top-level rule properties to avoid redundant", "count in top_events: self.text += '%s: %s\\n' % (term, count)", "self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file'])", "post_url = [post_url] self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload", "result key, since it would have been matched in the", "them: # {\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", #", "__init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy',", "root.copy().iteritems(): if type(value) == dict or type(value) == list: self.resolve_rule_references(root[key])", "%s\" % (self.jira_args, e)) elastalert_logger.info(\"Opened Jira ticket: %s\" % (self.issue))", "self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body = self.create_alert_body(matches) #", "proxy, if it was provided proxies = {'https': self.slack_proxy} if", "or numbers if arg_type == 'array': # As a convenience,", "= [self.watchers] if self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try: self.client", "not found. Valid priorities are %s\" % (self.priority, self.priority_ids.keys())) def", "(ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket", "it as an object, using 'name' as the key #", "self.get_aggregation_summary_text(matches) for match in matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if", "it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else", "The debug alerter uses a Python logger (by default, alerting", "it was provided proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else", "ElastAlert saying that we couldn't find that type? # OR", "tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if key_tuple not in", "in value] # Handle non-array types else: # Simple string", "= [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components',", "% e) elastalert_logger.info(\"Alert sent to ServiceNow\") def get_info(self): return {'type':", "alert. :param match: A dictionary of relevant information to the", "counts.items() if not top_events: self.text += 'No events found.\\n' else:", "'number': self.jira_args[arg_name] = int(value) elif arg_type == 'option': self.jira_args[arg_name] =", "{'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type':", "self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We used to support only", "import sys import warnings from email.mime.text import MIMEText from email.utils", "self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload,", "get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter):", "def get_info(self): return {'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): \"\"\"", "# Populate values with rule level properties too for i", "type(item) == dict or type(item) == list: self.resolve_rule_references(root[i]) else: root[i]", "EAException(\"Error posting to Gitter: %s\" % e) elastalert_logger.info(\"Alert sent to", "if key_value: incident_key_values[i] = key_value incident_key_values = ['<MISSING VALUE>' if", "rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type", "that we can see at a glance how many of", "they are required to be provided as an object. if", "{'type': 'stomp'} class DebugAlerter(Alerter): \"\"\" The debug alerter uses a", "reality, they are required to be provided as an object.", "'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title':", "'details': { \"information\": body.encode('UTF-8'), }, } # set https proxy,", "warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to", "ticket: %s' % (url) to_addr = self.rule['email'] if 'email_from_field' in", "self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color,", "None, sys.exc_info()[2] elastalert_logger.info(\"Trigger sent to Exotel\") def get_info(self): return {'type':", "https proxy, if it was provided proxies = {'https': self.victorops_proxy}", "as custom fields require special handling # Here is a", "subject = 'ElastAlert: %s' % (self.rule['name']) # If the rule", "= 'ElastAlert: %s' % (self.rule['name']) if for_search: return title title", "None) def alert(self, matches): body = u'⚠ *%s* ⚠ ```\\n'", "not find a definition for the jira field '{0}'\".format(normalized_jira_field)) arg_name", "= [{'value': v} for v in value] else: self.jira_args[arg_name] =", "# Remove the jira_ part. Convert underscores to spaces normalized_jira_field", "= {'value': value} else: self.jira_args[arg_name] = value # Number type", "be found in the 'id' or the 'name' field. Therefore,", "matches: payload = match if self.post_all_values else {} payload.update(self.post_static_payload) for", "which contains user and password information. \"\"\" account_conf = yaml_loader(account_file)", "if self.rule.get(\"fail_on_non_zero_exit\", False) and subp.wait(): raise EAException(\"Non-zero exit code while", "means it is either: # 1. A built-in supported field", "self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers = { \"Content-Type\": \"application/json\",", "= [lookup_es_key(matches[0], arg) for arg in alert_subject_args] # Support referencing", "match_items.sort(key=lambda x: x[0]) for key, value in match_items: if key.startswith('top_events_'):", "'email_from_field' in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring):", "{} # Maintain an aggregate count for each unique key", "] def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file'])", "activity message for each alert \"\"\" required_options = frozenset(['gitter_webhook_url']) def", "def get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text", "except OSError as e: raise EAException(\"Error while running command %s:", "}, } # set https proxy, if it was provided", "= self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self,", "value] else: self.jira_args[arg_name] = value elif array_items == 'number': self.jira_args[arg_name]", "AND summary~\"%s\" and created >= \"%s\"' % (self.project, title, date)", "to slack headers = {'content-type': 'application/json'} # set https proxy,", "try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status()", "provided as an object. if 'custom' in field['schema'] and field['schema']['custom']", "None) # Deprecated self.profile = self.rule.get('aws_profile', None) def create_default_title(self, matches):", "body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`', \"'\") body", "connecting to JIRA: %s\" % (str(e)[:1024])) try: if self.priority is", "% self.hipchat_room_id) def get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class", "%s\" % (to_addr)) def create_default_title(self, matches): subject = 'ElastAlert: %s'", "to ElastAlert saying that we couldn't find that type? #", "dashes if len(matches) > 1: body += '\\n----------------------------------------\\n' body +=", "be vulnerable to shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format =", "headers=headers, proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException(\"Error", "self.jira_args[arg_name] = int(value) elif arg_type == 'option': self.jira_args[arg_name] = {'value':", "to work. Other special characters and dashes # directly adjacent", "self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email to", "to %s\" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): \"\"\" Creates a HipChat", "{'https': self.victorops_proxy} if self.victorops_proxy else None payload = { \"message_type\":", "+= u' ```' headers = {'content-type': 'application/json'} # set https", "(len(body) > 9999): body = body[:9980] + '..(truncated)' # Use", "watchers on initial creation. Only as a follow-up action if", "else: command = [command_arg % matches[0] for command_arg in self.rule['command']]", "matches): # Default title, get stripped search version if 'alert_subject'", "= title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) #", "alert_subject_args] # Support referencing other top-level rule properties # This", "lookup_es_key(self.match, name) # Support referencing other top-level rule properties #", "body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts", "self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token", "```\\n' % (self.create_title(matches)) for match in matches: body += unicode(BasicMatchString(self.rule,", "at a glance how many of each aggregation_key were encountered", "error that will bubble up self.jira_args[arg_name] = [{'name': v} for", "datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~\"%s\" and created >= \"%s\"'", "' - %s+ events' % (count) return title def get_info(self):", "e)) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server']", "watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception as ex:", "{'name': value} def get_priorities(self): \"\"\" Creates a mapping of priority", "with dashes if len(matches) > 1: body += '\\n----------------------------------------\\n' body", "alerting to terminal). \"\"\" def alert(self, matches): qk = self.rule.get('query_key',", "\"\"\" required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter,", "elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append(", "frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '')", "payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text':", "return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return value def alert(self,", "get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options =", "val is None else val for val in alert_text_values] alert_text", "else: self.jira_args[arg_name] = {'name': value} def get_priorities(self): \"\"\" Creates a", "self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number", "'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red')", "add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix def", "'id']: field = next((f for f in fields if normalized_jira_field", "to VictorOps: %s\" % e) elastalert_logger.info(\"Trigger sent to VictorOps\") def", "self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return", "Support single label or list if type(self.labels) != list: self.labels", "metadata about all the fields defined on the jira server", "match_items: if key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\\\n', '\\n') if", "self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { \"information\": body.encode('UTF-8'), }, } #", "(self.telegram_api_url, self.telegram_bot_token, \"sendMessage\") self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self, matches):", "= self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color',", "} try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings()", "our radar # 2. A custom field that a JIRA", "try: if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']]", "\"\"\" if 'alert_subject' in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def", "root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue = unicode(value) if", "no tickets will ever be found.' % ( msg, ','.join(intersection))", "%s\" % (ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket']", "in the 'id' or the 'name' field. Therefore, try both", "rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number", "EAException(\"Error connecting to SMTP host: %s\" % (e)) except SMTPAuthenticationError", "import SMTP_SSL from smtplib import SMTPAuthenticationError from smtplib import SMTPException", "DebugAlerter(Alerter): \"\"\" The debug alerter uses a Python logger (by", "'%s': %s\" % (jql, e)) return None if len(issues): return", "message for each alert \"\"\" required_options = frozenset(['slack_webhook_url']) def __init__(self,", "= self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, \"sendMessage\")", "% self.telegram_room_id) def get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class", "self.rule['name']) else: title = 'ElastAlert: %s' % (self.rule['name']) if for_search:", "key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value incident_key_values =", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to Gitter:", "self.rule: to_addr = [name + self.rule['email_add_domain'] for name in to_addr]", "= \"This alert was triggered again at %s\\n%s\" % (timestamp,", "will do best-effort and try to set a string value", "[self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message')", "alert_text = alert_text.format(**kw) self.text += alert_text def _add_rule_text(self): self.text +=", "9999): body = body[:9980] + '..(truncated)' # Use appropriate line", "is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return", "isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary", "= unicode(value) if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:", "'ElastAlert: %s' % (self.rule['name']) return subject def alert(self, matches): body", "# a single value for a multi-value field e.g. jira_labels:", "match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at %s:' %", "for search to work. Other special characters and dashes #", "dict or type(value) == list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value)", "'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token", "exist?\\n{1}\" .format( watcher, ex )), None, sys.exc_info()[2] except JIRAError as", "field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else:", "Non serializable object, fallback to str pass self.text += '%s:", "qk in match: elastalert_logger.info( 'Alert for %s, %s at %s:'", "v} for v in value] else: # Try setting it", "def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for match in matches:", "in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) if", "e.g. jira_labels: Only_One_Label if type(value) != list: value = [value]", "\"\"\" Returns a dictionary of data related to this alert.", "summary_table_fields = [summary_table_fields] # Include a count aggregation so that", "keys] + [count]) text += text_table.draw() + '\\n\\n' return unicode(text)", "= {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches),", "def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key =", "recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if '@' in", "rule configuration. \"\"\" required_options = frozenset([]) def __init__(self, rule): self.rule", "+= text_table.draw() + '\\n\\n' return unicode(text) def create_default_title(self, matches): return", "best-effort and try to set a string value known_field_list =", "text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment =", "matches): body = self.create_alert_body(matches) # Add JIRA ticket if it", "an e-mail subject or JIRA issue summary. :param matches: A", "string value known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses',", "that will need to be updated on a case-by-case basis", "self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try: if", "= value # Number type elif arg_type == 'number': self.jira_args[arg_name]", "self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] #", "self.text += '%s: %s\\n' % (term, count) self.text += '\\n'", "+= '\\n' def _add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text =", "\"\"\" Creates a Slack room message for each alert \"\"\"", "try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except", "# Handle non-array types else: # Simple string types if", "Try setting it as an object, using 'name' as the", "qk = matches[0].get(self.rule['query_key']) if qk: subject += ' - %s'", "EAException(\"Non-zero exit code while running command %s\" % (' '.join(command)))", "is None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value", "in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is", "matches): body = u'⚠ *%s* ⚠ ```\\n' % (self.create_title(matches)) for", "value elif array_items == 'number': self.jira_args[arg_name] = [int(v) for v", "self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter):", "match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for", "for each alert \"\"\" required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule):", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting HTTP Post", "self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override =", "self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException,", "= self.server return None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches)", "= self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string',", "each alert \"\"\" required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter,", "'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This", "None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support referencing other top-level", "= self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self, body):", "self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy',", "find a definition for the jira field '{0}'\".format(normalized_jira_field)) arg_name =", "events' % (count) return title def get_info(self): return {'type': 'jira'}", "= self.rule.get('query_key', None) for match in matches: if qk in", "= self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30)", "list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg = '%s Both have", "= self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses", "[ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter, self).__init__(rule)", "= tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if key_tuple not", "alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches)", "If we find a field that is not covered by", "= datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')", "RequestException as e: raise EAException(\"Error posting HTTP Post alert: %s\"", "self.jira_args[arg_name] = value elif array_items == 'number': self.jira_args[arg_name] = [int(v)", "'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key", "and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix def alert(self,", "to be passed as objects with an identifier 'key' elif", "Teams headers = {'content-type': 'application/json'} # set https proxy, if", "for JIRA ticket using jql '%s': %s\" % (jql, e))", "= '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses))", "from smtplib import SMTP from smtplib import SMTP_SSL from smtplib", "super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False)", "'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, \"sendMessage\") self.telegram_proxy = self.rule.get('telegram_proxy', None) def", "= {} # Maintain an aggregate count for each unique", "% (self.create_title(matches)) for match in matches: body += unicode(BasicMatchString(self.rule, match))", "must have user and password fields') self.user = account_conf['user'] self.password", "for each alert \"\"\" required_options = frozenset(['slack_webhook_url']) def __init__(self, rule):", "it's Latin-1 to show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4,", "= self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override',", "raise EAException(\"Error posting to HipChat: %s\" % e) elastalert_logger.info(\"Alert sent", "to HipChat room %s\" % self.hipchat_room_id) def get_info(self): return {'type':", "(%s). As such, no tickets will ever be found.' %", "% e) elastalert_logger.info(\"Trigger sent to Twilio\") def get_info(self): return {'type':", "'email_add_domain' in self.rule: to_addr = [recipient + self.rule['email_add_domain']] elif isinstance(recipient,", "label or list if type(self.labels) != list: self.labels = [self.labels]", "def get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): \"\"\"", "'\\\\\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND", "% (url) to_addr = self.rule['email'] if 'email_from_field' in self.rule: recipient", "line ending for text/html if self.hipchat_message_format == 'html': body =", "have user and password fields') self.user = account_conf['user'] self.password =", "match))) fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule,", "PagerDuty for each alert \"\"\" required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def", "{'https': self.slack_proxy} if self.slack_proxy else None payload = { 'username':", "fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match))", "self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if", "account_conf or 'password' not in account_conf: raise EAException('Account file must", "SMTPException from socket import error import boto3 import requests import", "aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text +=", "not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError: logging.error(\"Priority %s", "else: title = self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title =", "None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False)", "def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name =", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to ServiceNow:", "= Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an aggregate", "else val for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return", "to Exotel\") def get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class", "post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url = [post_url] self.post_url", "(lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert: %s' % (self.rule['name'])", "= self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify',", "'error') def alert(self, matches): body = self.create_alert_body(matches) # post to", "= title.replace('\\\\', '\\\\\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql =", "%s\" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This API returns", "v} for v in value] # Handle non-array types else:", "for_search=False): # If there is a query_key, use that in", "in value] else: # Try setting it as an object,", "== 'option': self.jira_args[arg_name] = {'value': value} # Complex type else:", "field in JIRA that we don't have on our radar", "class for types of alerts. :param rule: The rule configuration.", "the following data for summary_table_fields ==> {0}:\\n\\n\".format( summary_table_fields_with_count ) text_table", "account_conf = yaml_loader(account_file) if 'user' not in account_conf or 'password'", "% (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue", "class SlackAlerter(Alerter): \"\"\" Creates a Slack room message for each", "self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color =", "'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url']", "the jira field '{0}'\".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle arrays", "body = u'⚠ *%s* ⚠ ```\\n' % (self.create_title(matches)) for match", "existing ticket %s' % (ticket.key)) for match in matches: try:", "%H:%M:%S\") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport =", "# set https proxy, if it was provided proxies =", "self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts in self.match.items():", "self.max_age = self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets',", "e: # JIRAError may contain HTML, pass along only first", "JIRAError may contain HTML, pass along only first 1024 chars", "'id', 'value', or something else # If it works, great!", "% (key[11:]) top_events = counts.items() if not top_events: self.text +=", "pretend it's Latin-1 to show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True,", "matches: A list of dictionaries of relevant information to the", "lookup_es_key call above if val is None: val = self.rule.get(name)", "alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for arg in", "POST. Encoded with JSON. \"\"\" def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule)", "'.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] =", "in root.copy().iteritems(): if type(value) == dict or type(value) == list:", "fields defined on the jira server (built-ins and custom ones)", "'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] # Some built-in", "class TelegramAlerter(Alerter): \"\"\" Send a Telegram message via bot api", "that will bubble up self.jira_args[arg_name] = [{'name': v} for v", "ElastAlerter.send_alert() # and attached to each alerters used by a", "an alert. Match is a dictionary of information about the", "return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches)", "util import ts_now from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def", "self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy =", "how to set # For anything else, we will do", "command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else: command =", "class ServiceNowAlerter(Alerter): \"\"\" Creates a ServiceNow alert \"\"\" required_options =", "count for each unique key encountered in the aggregation period", "alert() self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support", "else: title = 'ElastAlert: %s' % (self.rule['name']) if for_search: return", "the alert. \"\"\" if 'alert_subject' in self.rule: return self.create_custom_title(matches) return", "(self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s at", "custom types (the JIRA metadata says that these are strings,", "can not add watchers on initial creation. Only as a", "if assignee: self.jira_args['assignee'] = {'name': assignee} elif 'assignee' in self.jira_args:", "region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches)", "text_table.draw() + '\\n\\n' return unicode(text) def create_default_title(self, matches): return self.rule['name']", "for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def", "import Exotel from jira.client import JIRA from jira.exceptions import JIRAError", "are set.' % \\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) &", "self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin')", "create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args", "component. This allows us to maintain backwards compatibility # while", "(SMTPException, error) as e: raise EAException(\"Error connecting to SMTP host:", "= field['schema']['items'] # Simple string types if array_items in ['string',", "not ('schema' in field or 'type' in field['schema']): raise Exception(\"Could", "Convert email to a list if it isn't already if", "ok title = title.replace(' - ', ' ') title =", "':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override", "def format_body(self, body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8') body =", "in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)", "frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy", "the title if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title", "False) and subp.wait(): raise EAException(\"Non-zero exit code while running command", "= self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try: response = requests.post(url,", "account_conf['password'] class StompAlerter(Alerter): \"\"\" The stomp alerter publishes alerts via", "works, great! If not, it will manifest itself as an", "__init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format =", "Use appropriate line ending for text/html if self.hipchat_message_format == 'html':", "dictionary of data related to this alert. At minimum, this", "e) elastalert_logger.info(\"Alert sent to MS Teams\") def get_info(self): return {'type':", "an aggregate count for each unique key encountered in the", "alerters used by a rule before calling alert() self.pipeline =", "[value] array_items = field['schema']['items'] # Simple string types if array_items", "characters if (len(body) > 9999): body = body[:9980] + '..(truncated)'", "get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): \"\"\" Send", "self.rule.get('slack_text_string', '') def format_body(self, body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8')", "field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for", "for each alert \"\"\" required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self,", "lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name']", "if self.gitter_proxy else None payload = { 'message': body, 'level':", "if self.priority is not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except", "self.resolve_rule_reference(item) elif type(root) == dict: # Make a copy since", "to_addr = [name + self.rule['email_add_domain'] for name in to_addr] email_msg", "for the jira field '{0}'\".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle", "for_search: return title title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']],", "v in value] else: # Try setting it as an", "at %s\\n%s\" % (timestamp, text) self.client.add_comment(ticket, comment) def alert(self, matches):", "for watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception as", "== 'option': self.jira_args[arg_name] = [{'value': v} for v in value]", "super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url =", "{'https': self.gitter_proxy} if self.gitter_proxy else None payload = { 'message':", "\"sendMessage\") self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self, matches): body =", "return {'type': 'stomp'} class DebugAlerter(Alerter): \"\"\" The debug alerter uses", "stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) + '\\n'", "request on messages longer than 10000 characters if (len(body) >", "HipChat sends 400 bad request on messages longer than 10000", "import MIMEText from email.utils import formatdate from smtplib import SMTP", "for v in value] # Handle non-array types else: #", "return {'type': 'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): \"\"\" Send", "\"\"\" The debug alerter uses a Python logger (by default,", "texttable import Texttable from twilio.base.exceptions import TwilioRestException from twilio.rest import", "subject += ' - %s' % (qk) return subject def", "how to set the value correctly # If the schema", "Complex type else: self.jira_args[arg_name] = {'name': value} def get_priorities(self): \"\"\"", "multi-value field e.g. jira_labels: Only_One_Label if type(value) != list: value", "'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def", "proxy, if it was provided proxies = {'https': self.victorops_proxy} if", "spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower() # All jira fields", "cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException(\"Error", "else val alert_text = alert_text.format(**kw) self.text += alert_text def _add_rule_text(self):", "= priorities[x].id def set_assignee(self, assignee): self.assignee = assignee if assignee:", "each alert \"\"\" required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule):", "x[0]) for key, value in match_items: if key.startswith('top_events_'): continue value_str", "'debug'} class EmailAlerter(Alerter): \"\"\" Sends an email alert \"\"\" required_options", "in (%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s", "payload = { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key':", "is only the case for two built-in types, id: issuekey", "from smtplib import SMTPAuthenticationError from smtplib import SMTPException from socket", "be updated on a case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',", "= SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'):", "injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format = False if 'new_style_string_format' in", "proxy, if it was provided proxies = {'https': self.pagerduty_proxy} if", "the latter... raise Exception(\"Could not find a definition for the", "to Gitter: %s\" % e) elastalert_logger.info(\"Alert sent to Gitter\") def", "if arg_type == 'array': # As a convenience, support the", "create_alert_body(self, matches): body = self.description + '\\n' body += self.get_aggregation_summary_text(matches)", "'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id'", "try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file,", "None else val for val in alert_subject_values] return alert_subject.format(*alert_subject_values) return", "self).__init__(*args) self.last_command = [] self.shell = False if isinstance(self.rule['command'], basestring):", "super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html')", "the type of Alerter. \"\"\" return {'type': 'Unknown'} def create_title(self,", "\"\"\" raise NotImplementedError() def get_info(self): \"\"\" Returns a dictionary of", "'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype',", "matches, for_search=False): # If there is a query_key, use that", "for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))", "if 'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') #", "(' '.join(command))) except OSError as e: raise EAException(\"Error while running", "in matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1:", "conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type':", "e)) def get_info(self): return {'type': 'command', 'command': ' '.join(self.last_command)} class", "in self.components] if self.labels: # Support single label or list", "alerts. :param rule: The rule configuration. \"\"\" required_options = frozenset([])", "\"\"\" Send an alert. Match is a dictionary of information", "'stomp_password']) def alert(self, matches): alerts = [] qk = self.rule.get('query_key',", "matches): body = self.create_alert_body(matches) # HipChat sends 400 bad request", "1024 chars raise EAException(\"Error connecting to JIRA: %s\" % (str(e)[:1024]))", "self.client.create_issue(**self.jira_args) # You can not add watchers on initial creation.", "# Maintain an aggregate count for each unique key encountered", "self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type =", "rule, match): self.rule = rule self.match = match def _ensure_new_line(self):", "Creates a Slack room message for each alert \"\"\" required_options", "payload = { 'message': body, 'level': self.gitter_msg_level } try: response", "try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json =", "in match: elastalert_logger.info( 'Alert for %s, %s at %s:' %", "raise EAException(\"SMTP username/password rejected: %s\" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())", "self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] = title self.jira_args['description'] =", "proxy, if it was provided proxies = {'https': self.gitter_proxy} if", "= to_addr + self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp", "{'https': self.telegram_proxy} if self.telegram_proxy else None payload = { 'chat_id':", "incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] # Populate", "self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name',", "'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response = requests.post(self.url, data=json.dumps(payload,", "all the fields defined on the jira server (built-ins and", "(by default, alerting to terminal). \"\"\" def alert(self, matches): qk", "'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): \"\"\" Create an incident on PagerDuty", "if qk in match: elastalert_logger.info( 'Alert for %s, %s at", "ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is not None: self.pipeline['jira_ticket'] =", "logging.warning('Warning! You could be vulnerable to shell injection!') self.rule['command'] =", "self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy',", "# Handle arrays of simple types like strings or numbers", "= self.get_aggregation_summary_text(matches) for match in matches: body += unicode(BasicMatchString(self.rule, match))", "def get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in self.rule", "HTTP POST. Encoded with JSON. \"\"\" def __init__(self, rule): super(HTTPPostAlerter,", "list): summary_table_fields = [summary_table_fields] # Include a count aggregation so", "= frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key", "+ 1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key for key", "self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\", \"state_message\": body } try: response", "self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not", "= 'ElastAlert: %s' % (self.rule['name']) # If the rule has", "vulnerable to shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format = False", "RequestException as e: raise EAException(\"Error posting to ServiceNow: %s\" %", "data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise", "warning to ElastAlert saying that we couldn't find that type?", "if type(value) == dict or type(value) == list: self.resolve_rule_references(root[key]) else:", "use that in the title if 'query_key' in self.rule and", "self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain =", "Latin-1 to show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1',", "(self.sns_topic_arn)) class HipChatAlerter(Alerter): \"\"\" Creates a HipChat room notification for", "a dictionary of data related to this alert. At minimum,", "= frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule)", "call above for i in xrange(len(alert_subject_values)): if alert_subject_values[i] is None:", "self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')", "self.create_alert_body(matches) # post to pagerduty headers = {'content-type': 'application/json'} payload", "other top-level rule properties # This technically may not work", "This allows us to maintain backwards compatibility # while also", "self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1')", "if self.ms_teams_alert_fixed_width: body = body.replace('`', \"'\") body = \"```{0}```\".format('```\\n\\n```'.join(x for", "def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule:", "alert \"\"\" required_options = frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args)", "EAException(\"Error connecting to JIRA: %s\" % (str(e)[:1024])) try: if self.priority", "not determine schema information for the jira field '{0}'\".format(normalized_jira_field)) arg_type", "self.rule.get('use_local_time'))) # Add count for spikes count = matches[0].get('spike_count') if", "self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color", "= {'https': self.servicenow_proxy} if self.servicenow_proxy else None payload = {", "{'type': 'jira'} class CommandAlerter(Alerter): required_options = set(['command']) def __init__(self, *args):", "None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body =", "TelegramAlerter(Alerter): \"\"\" Send a Telegram message via bot api for", "self.jira_args[arg_name] = {'name': value} def get_priorities(self): \"\"\" Creates a mapping", "lets pretend it's Latin-1 to show something return json.dumps(blob, cls=DateTimeEncoder,", "matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to", "except JIRAError as e: # JIRAError may contain HTML, pass", "bad request on messages longer than 10000 characters if (len(body)", "required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule)", "is a top-level rule property with the same name #", "it will manifest itself as an API error that will", "= self.rule.get('aws_profile', None) def create_default_title(self, matches): subject = 'ElastAlert: %s'", "TwilioRestException as e: raise EAException(\"Error posting to twilio: %s\" %", "{\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My Custom Field\"],\"schema\":{\"type\":\"array\",\"items\":\"string\", # \"custom\":\"com.atlassian.jira.plugin.system.customfieldtypes:multiselect\",\"customId\":12807}} #", "= self.rule.get('query_key', None) fullmessage = {} for match in matches:", "self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self, matches): body =", "self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def", "(jql, e)) return None if len(issues): return issues[0] def comment_on_ticket(self,", "- datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is not", "self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x, y) for", "def get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options", "minimum, this should contain a field type corresponding to the", "'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): \"\"\" Creates a Gitter activity message", "class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self,", "next((f for f in fields if normalized_jira_field == f[identifier].replace('_', '", "yaml_loader from texttable import Texttable from twilio.base.exceptions import TwilioRestException from", "value plus timestamp to subject if 'query_key' in self.rule: qk", "posting to ms teams: %s\" % e) elastalert_logger.info(\"Alert sent to", "class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'isoformat'): return obj.isoformat()", "self.priority is not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError:", "saying that we couldn't find that type? # OR raise", "val alert_text = alert_text.format(**kw) self.text += alert_text def _add_rule_text(self): self.text", "it to a list if it isn't bcc = self.rule.get('bcc')", "','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except JIRAError as e: logging.exception(\"Error", "try: self.issue = self.client.create_issue(**self.jira_args) # You can not add watchers", "None else val for val in alert_text_values] alert_text = alert_text.format(*alert_text_values)", "matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for arg in", "!= '\\n\\n': self.text += '\\n' def _add_custom_alert_text(self): missing = '<MISSING", "sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent sns", "message_body) if response != 200: raise EAException(\"Error posting to Exotel,", "self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body", "title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add", "SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)", "fields = self.client.fields() for jira_field, value in self.rule.iteritems(): # If", "self.client.search_issues(jql) except JIRAError as e: logging.exception(\"Error while searching for JIRA", "blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError:", "class GitterAlerter(Alerter): \"\"\" Creates a Gitter activity message for each", "not self.post_payload) def alert(self, matches): \"\"\" Each match will trigger", "return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): \"\"\" Creates a", "get_account(self, account_file): \"\"\" Gets the username and password from an", "def get_info(self): return {'type': 'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter):", "value} def get_priorities(self): \"\"\" Creates a mapping of priority index", "TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e:", "above for i in xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value", "i, item in enumerate(copy.copy(root)): if type(item) == dict or type(item)", "SMTP from smtplib import SMTP_SSL from smtplib import SMTPAuthenticationError from", "for each unique key encountered in the aggregation period for", "alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name']", "response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException", "self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings()", "jql '%s': %s\" % (jql, e)) return None if len(issues):", "as a watcher. Does the user exist?\\n{1}\" .format( watcher, ex", "headers = {'content-type': 'application/json'} # set https proxy, if it", "== 'number': self.jira_args[arg_name] = [int(v) for v in value] #", "a sample of one of them: # {\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true,", "so that we can see at a glance how many", "count: title += ' - %s+ events' % (count) return", "likely others that will need to be updated on a", "but # in reality, they are required to be provided", "conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect()", "else: self.jira_args[arg_name] = value elif array_items == 'number': self.jira_args[arg_name] =", "if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert:", "default, alerting to terminal). \"\"\" def alert(self, matches): qk =", "added raise Exception( \"Exception encountered when trying to add '{0}'", "= post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload", "self.slack_emoji_override for url in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload,", "You can not add watchers on initial creation. Only as", "list if it isn't bcc = self.rule.get('bcc') if bcc and", "alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] =", "used to support only a single label. This allows us", "from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if", "keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if", "self.rule.get('boto_profile', None) # Deprecated self.profile = self.rule.get('aws_profile', None) def create_default_title(self,", "\"\"\" Creates a HipChat room notification for each alert \"\"\"", "None payload = { '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary,", "e) elastalert_logger.info(\"Alert sent to Gitter\") def get_info(self): return {'type': 'gitter',", "% (qk) return subject def get_info(self): return {'type': 'email', 'recipients':", "not covered by the set that we are aware of,", "def set_assignee(self, assignee): self.assignee = assignee if assignee: self.jira_args['assignee'] =", "top-level rule properties to avoid redundant copy/paste if type(root) ==", "anything else, we will do best-effort and try to set", "alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number)", "for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()),", "SMTP host: %s\" % (e)) except SMTPAuthenticationError as e: raise", "JSON. \"\"\" def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url')", "conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter):", "if type(value) != list: value = [value] array_items = field['schema']['items']", "'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url,", "__init__(self, rule): self.rule = rule # pipeline object is created", "+ ['count'] text += \"Aggregation resulted in the following data", "{'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): \"\"\" Creates a Microsoft", "None payload = { 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format,", "'') # This is necessary for search to work. Other", "in match for the given rule. \"\"\" def __init__(self, rule,", "elif type(root) == dict: # Make a copy since we", "self.priority_ids[self.priority]} except KeyError: logging.error(\"Priority %s not found. Valid priorities are", "fail to load the alert entirely? Probably the latter... raise", "frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid", "dashes # directly adjacent to words appear to be ok", "for val in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self,", "summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} #", "requests.exceptions import RequestException from staticconf.loader import yaml_loader from texttable import", "def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project", "if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def", "+ [count]) text += text_table.draw() + '\\n\\n' return unicode(text) def", "alert(self, matches): body = u'⚠ *%s* ⚠ ```\\n' % (self.create_title(matches))", "also giving the user-facing API a more representative name self.labels", "normalized_jira_field = jira_field[5:].replace('_', ' ').lower() # All jira fields should", "self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self, matches): body", "import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'isoformat'):", "raise EAException('Account file must have user and password fields') self.user", "self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body']", "alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] =", "(qk) return subject def get_info(self): return {'type': 'email', 'recipients': self.rule['email']}", "= {'id': self.priority_ids[self.priority]} except KeyError: logging.error(\"Priority %s not found. Valid", "try both just in case for identifier in ['name', 'id']:", "to Exotel, response code is %s\" % response) except: raise", "to str pass self.text += '%s: %s\\n' % (key, value_str)", "JiraAlerter(Alerter): \"\"\" Creates a Jira ticket for each alert \"\"\"", "def _ensure_new_line(self): while self.text[-2:] != '\\n\\n': self.text += '\\n' def", "Populate values with rule level properties too for i in", "= '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses)) if", "self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url]", "aggregate count for each unique key encountered in the aggregation", "'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): \"\"\" Creates a ServiceNow alert", "Alerter(object): \"\"\" Base class for types of alerts. :param rule:", "import ts_now from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self,", "messages longer than 10000 characters if (len(body) > 9999): body", "of (%s). As such, no tickets will ever be found.'", "EAException(\"Error posting to Exotel\"), None, sys.exc_info()[2] elastalert_logger.info(\"Trigger sent to Exotel\")", "[self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain']", "url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\\nJIRA ticket:", "built-in jira types that can be used as custom fields", "% e) elastalert_logger.info(\"Alert sent to MS Teams\") def get_info(self): return", "for match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as", "val = self.rule.get(name) kw[kw_name] = missing if val is None", "[name + self.rule['email_add_domain'] for name in to_addr] email_msg = MIMEText(body.encode('UTF-8'),", "password fields') self.user = account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter):", "post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload =", "provides # a single value for a multi-value field e.g.", "rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url", "= {'name': self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities()", "cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: # This blob contains", "'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): \"\"\" Creates a VictorOps Incident", "= {'https': self.telegram_proxy} if self.telegram_proxy else None payload = {", "self.server def create_alert_body(self, matches): body = self.description + '\\n' body", "= self.create_alert_body(matches) # HipChat sends 400 bad request on messages", "payload = { 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify':", "alert(self, matches): \"\"\" Each match will trigger a POST to", "self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age)", "self.client.fields() for jira_field, value in self.rule.iteritems(): # If we find", "self.last_command = [] self.shell = False if isinstance(self.rule['command'], basestring): self.shell", "= self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def alert(self,", "the jira_ part. Convert underscores to spaces normalized_jira_field = jira_field[5:].replace('_',", "def alert(self, matches): alerts = [] qk = self.rule.get('query_key', None)", "for each alert \"\"\" required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def", "try: message_body = self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'],", "if self.post_proxy else None for url in self.post_url: try: response", "pipeline object is created by ElastAlerter.send_alert() # and attached to", "'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body", "for types of alerts. :param rule: The rule configuration. \"\"\"", "% (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at %s:'", "match: elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'],", "posting to ServiceNow: %s\" % e) elastalert_logger.info(\"Alert sent to ServiceNow\")", "or 'type' in field['schema']): raise Exception(\"Could not determine schema information", "HTTP Post alert: %s\" % e) elastalert_logger.info(\"HTTP Post alert sent.\")", "if self.watchers: # Support single watcher or list if type(self.watchers)", "an exception since we don't know how to set it", "from twilio.base.exceptions import TwilioRestException from twilio.rest import Client as TwilioClient", "return {'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): \"\"\" Creates a", "body = self.format_body(body) # post to Teams headers = {'content-type':", "% (key, value_str) def _pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder,", "% (' '.join(command), e)) def get_info(self): return {'type': 'command', 'command':", "text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return", "The stomp alerter publishes alerts via stomp to a broker.", "and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues", "self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches): \"\"\" Each match will", "found in the 'id' or the 'name' field. Therefore, try", "= self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if", "alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value alert_text_values =", "self.rule.iteritems(): # If we find a field that is not", "# Deprecated self.profile = self.rule.get('aws_profile', None) def create_default_title(self, matches): subject", "in range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee): self.assignee =", "matches): body = self.create_alert_body(matches) # post to victorops headers =", "if there is a top-level rule property with the same", "value] else: # Try setting it as an object, using", "'\\n' def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x: x[0]) for", "If it works, great! If not, it will manifest itself", "\"\"\" The stomp alerter publishes alerts via stomp to a", "as e: raise EAException(\"SMTP username/password rejected: %s\" % (e)) self.smtp.sendmail(self.from_addr,", "sent to Exotel\") def get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}", "is None: val = self.rule.get(name) kw[kw_name] = missing if val", "response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings()", "= self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None)", "provided proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None try:", "self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override',", "util import pretty_ts from util import ts_now from util import", "msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.'", "e: raise EAException(\"Error posting to twilio: %s\" % e) elastalert_logger.info(\"Trigger", "sent to VictorOps\") def get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key}", "# If the rule has a query_key, add that value", "more representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We used", "matches[0].get(self.rule['query_key']) if qk: subject += ' - %s' % (qk)", "client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name'] + self.sms_body", "structure we're walking for key, value in root.copy().iteritems(): if type(value)", "return body def get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if", "def alert(self, matches): body = self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id,", "self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None) #", "Telegram room %s\" % self.telegram_room_id) def get_info(self): return {'type': 'telegram',", "self.post_proxy else None for url in self.post_url: try: response =", "query_key, add that value plus timestamp to subject if 'query_key'", "Re-raise the exception, preserve the stack-trace, and give some #", "self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url =", "% e) elastalert_logger.info(\"Trigger sent to VictorOps\") def get_info(self): return {'type':", "# Add JIRA ticket if it exists if self.pipeline is", "alert_value: alert_text_values[i] = alert_value alert_text_values = [missing if val is", "the scenario wherein the user only provides # a single", "(timestamp, text) self.client.add_comment(ticket, comment) def alert(self, matches): title = self.create_title(matches)", "matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert:", "title += ' - %s+ events' % (count) return title", "other.' logging.warning(msg) self.jira_args = {'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}}", "title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is necessary for search to", "# Post to HipChat headers = {'content-type': 'application/json'} # set", "lookup_es_key call above for i in xrange(len(alert_text_values)): if alert_text_values[i] is", "'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] # Some built-in jira types", "if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True def", "self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details':", "a bcc then also convert it to a list if", "A list of dictionaries of relevant information to the alert.", "# Special case for custom types (the JIRA metadata says", "'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ]", "if self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s) and", "return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text", "= {'name': assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self,", "= str(BasicMatchString(self.rule, match)) # Set proper headers headers = {", "a single value for a multi-value field e.g. jira_labels: Only_One_Label", "\"information\": body.encode('UTF-8'), }, } # set https proxy, if it", "self.watchers: # Support single watcher or list if type(self.watchers) !=", "proxy, if it was provided proxies = {'https': self.telegram_proxy} if", "not work, as the key might actually be 'key', 'id',", "'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): \"\"\" Creates a Gitter activity", "API returns metadata about all the fields defined on the", "'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): \"\"\" Create an", "kw = {} for name, kw_name in self.rule.get('alert_text_kw').items(): val =", "self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify =", "self.pipeline is not None and 'jira_ticket' in self.pipeline: url =", "as the key might actually be 'key', 'id', 'value', or", "support only a single label. This allows us to maintain", "server (built-ins and custom ones) fields = self.client.fields() for jira_field,", "Send alert using AWS SNS service \"\"\" required_options = frozenset(['sns_topic_arn'])", "self.shell = False if isinstance(self.rule['command'], basestring): self.shell = True if", "plus timestamp to subject if 'query_key' in self.rule: qk =", "and strValue[1:-1] in self.rule: if type(value) == int: return int(self.rule[strValue[1:-1]])", "data related to this alert. At minimum, this should contain", "alert_text_values = [missing if val is None else val for", "creating JIRA ticket using jira_args (%s): %s\" % (self.jira_args, e))", "to HipChat headers = {'content-type': 'application/json'} # set https proxy,", "self._pretty_print_as_json(value) except TypeError: # Non serializable object, fallback to str", "pass self.text += '%s: %s\\n' % (key, value_str) def _pretty_print_as_json(self,", "a top-level rule property with the same name # as", "len(matches) > 1: body += '\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self,", "{0}:\\n\\n\".format( summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {}", "types if arg_type in ['string', 'date', 'datetime']: # Special case", "None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body = self.create_alert_body(matches)", "try: self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError", "\"\"\" required_options = frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn", "# Support referencing other top-level rule properties # This technically", "stomp alerter publishes alerts via stomp to a broker. \"\"\"", "key)) for key in summary_table_fields]) if key_tuple not in match_aggregation:", "title = self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'],", "% (ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket'] =", "= self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', '<PASSWORD>') self.stomp_destination = self.rule.get('stomp_destination',", "% (self.jira_args, e)) elastalert_logger.info(\"Opened Jira ticket: %s\" % (self.issue)) if", "\"\"\" required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule)", "= {'https': self.post_proxy} if self.post_proxy else None for url in", "via stomp to a broker. \"\"\" required_options = frozenset(['stomp_hostname', 'stomp_hostport',", "JIRA admin has configured if jira_field.startswith('jira_') and jira_field not in", "% e) elastalert_logger.info(\"Trigger sent to PagerDuty\") def get_incident_key(self, matches): if", "in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject", "'us-east-1') self.profile = self.rule.get('boto_profile', None) # Deprecated self.profile = self.rule.get('aws_profile',", "of simple types like strings or numbers if arg_type ==", "v} for v in value] else: self.jira_args[arg_name] = value elif", "headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e:", "= {'https': self.gitter_proxy} if self.gitter_proxy else None payload = {", "e: raise EAException(\"Error posting HTTP Post alert: %s\" % e)", "'@' + add_suffix def alert(self, matches): body = self.create_alert_body(matches) #", "None: val = self.rule.get(name) kw[kw_name] = missing if val is", "the schema information to decide how to set the value", "Other special characters and dashes # directly adjacent to words", "'project=%s AND summary~\"%s\" and created >= \"%s\"' % (self.project, title,", "Post to HipChat headers = {'content-type': 'application/json'} # set https", "in case for identifier in ['name', 'id']: field = next((f", "self.slack_proxy else None payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override,", "support the scenario wherein the user only provides # a", "cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except RequestException as e:", "self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def", "body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`', \"'\") body = \"```{0}```\".format('```\\n\\n```'.join(x", "Support single watcher or list if type(self.watchers) != list: self.watchers", "self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override", "client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response != 200: raise EAException(\"Error posting", "from exotel import Exotel from jira.client import JIRA from jira.exceptions", "description. description = str(BasicMatchString(self.rule, match)) # Set proper headers headers", "self.shell = True if '%' in self.rule['command']: logging.warning('Warning! You could", "str(BasicMatchString(self.rule, match)) # Set proper headers headers = { \"Content-Type\":", "\"Aggregation resulted in the following data for summary_table_fields ==> {0}:\\n\\n\".format(", "if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc']", "while searching for JIRA ticket using jql '%s': %s\" %", "body = body.replace('\\n', '<br />') # Post to HipChat headers", "{'content-type': 'application/json'} # set https proxy, if it was provided", "'')) if 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values =", "GitterAlerter(Alerter): \"\"\" Creates a Gitter activity message for each alert", "fullmessage['alertDate'] = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname',", "y in self.match.items() if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text", "the jira server (built-ins and custom ones) fields = self.client.fields()", "self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now() -", "self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise EAException(\"Error connecting", "arg_name = field['id'] # Check the schema information to decide", "%s\" % self.hipchat_room_id) def get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id}", "bubble up self.jira_args[arg_name] = [{'name': v} for v in value]", "= self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number =", "requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e:", "'@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body", "not in account_conf or 'password' not in account_conf: raise EAException('Account", "self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args)", "self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response", "is not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server return", "if 'alert_subject' in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self,", "in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for", "it works, great! If not, it will manifest itself as", "service \"\"\" required_options = frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args)", "cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException(\"Error", "json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter): \"\"\"", "%s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at", "for x in range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee):", "= '' if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:", "match_json = json.dumps(matches, cls=DateTimeEncoder) + '\\n' stdout, stderr = subp.communicate(input=match_json)", "This may not work, as the key might actually be", "list: self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee'] = {'name': self.assignee}", "self.issue = self.client.create_issue(**self.jira_args) # You can not add watchers on", "'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger',", "= self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def", "[self.watchers] if self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try: self.client =", "= self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {})", "characters and dashes # directly adjacent to words appear to", "alert(self, matches): body = self.create_alert_body(matches) # post to victorops headers", "is a cc then also convert it a list if", "jira_field not in self.known_field_list: # Remove the jira_ part. Convert", "response code is %s\" % response) except: raise EAException(\"Error posting", "a dictionary of information about the alert. :param match: A", "and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s)", "util import EAException from util import elastalert_logger from util import", "self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)", "Creates a string containing fields in match for the given", "from util import elastalert_logger from util import lookup_es_key from util", "return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty',", "were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text += \"Aggregation", "arg) for arg in alert_subject_args] # Support referencing other top-level", "'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields': [] } ] }", "been matched in the lookup_es_key call above if val is", "frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid", "alert: %s\" % e) elastalert_logger.info(\"HTTP Post alert sent.\") def get_info(self):", "provided proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None payload", "\"caller_id\": self.rule[\"caller_id\"] } try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']),", "__init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell = False", "isinstance(recipient, basestring): if '@' in recipient: to_addr = [recipient] elif", "basestring): if '@' in recipient: to_addr = [recipient] elif 'email_add_domain'", "return subject def alert(self, matches): body = self.create_alert_body(matches) session =", "self.profile = self.rule.get('aws_profile', None) def create_default_title(self, matches): subject = 'ElastAlert:", "rule has a query_key, add that value plus timestamp to", "string types if arg_type in ['string', 'date', 'datetime']: # Special", "%s\" % e) elastalert_logger.info(\"Trigger sent to Twilio\") def get_info(self): return", "EAException(\"Error posting HTTP Post alert: %s\" % e) elastalert_logger.info(\"HTTP Post", "'.join(command), e)) def get_info(self): return {'type': 'command', 'command': ' '.join(self.last_command)}", "__init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring):", "Texttable from twilio.base.exceptions import TwilioRestException from twilio.rest import Client as", "victorops headers = {'content-type': 'application/json'} # set https proxy, if", "# post to slack headers = {'content-type': 'application/json'} # set", "'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server", "copy/paste if type(root) == list: # Make a copy since", "about the alert. :param match: A dictionary of relevant information", "bcc then also convert it to a list if it", "self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else:", "else: # Try setting it as an object, using 'name'", "'date', 'datetime']: # Special case for custom types (the JIRA", "True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % (", "return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): \"\"\" Send a", "= self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self, matches): for", "handle arrays of complex types that have to be passed", "provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None payload", "self.jira_args['labels'] = self.labels if self.watchers: # Support single watcher or", "self.gitter_proxy} if self.gitter_proxy else None payload = { 'message': body,", "except SMTPAuthenticationError as e: raise EAException(\"SMTP username/password rejected: %s\" %", "alert(self, matches): body = self.create_alert_body(matches) # Add JIRA ticket if", "%s\\n' % (key, value_str) def _pretty_print_as_json(self, blob): try: return json.dumps(blob,", "None if len(issues): return issues[0] def comment_on_ticket(self, ticket, match): text", "alert(self, matches): for match in matches: # Parse everything into", "fields') self.user = account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter): \"\"\"", "an object, using 'name' as the key # This may", "%s' % (qk) return subject def get_info(self): return {'type': 'email',", "post to Teams headers = {'content-type': 'application/json'} # set https", "self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type': 'stomp'} class", "= json.dumps(matches, cls=DateTimeEncoder) + '\\n' stdout, stderr = subp.communicate(input=match_json) if", "self.rule and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not", "import elastalert_logger from util import lookup_es_key from util import pretty_ts", "False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'):", "only the case for two built-in types, id: issuekey and", "def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) #", "self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\\nJIRA", "[self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override", "OSError as e: raise EAException(\"Error while running command %s: %s\"", "message_body = self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)", "'') def format_body(self, body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body", "else None payload = { 'color': self.hipchat_msg_color, 'message': body, 'message_format':", "if key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple]", "proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload =", "else {} payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items(): payload[post_key] =", "rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number", "True } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)", "if val is None else val for val in alert_text_values]", "= { 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify,", "TwilioClient from util import EAException from util import elastalert_logger from", "work. Other special characters and dashes # directly adjacent to", "rule property with the same name # as an es", "to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc']", "the structure we're walking for key, value in root.copy().iteritems(): if", "if 'query_key' in self.rule: qk = matches[0].get(self.rule['query_key']) if qk: subject", "basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override =", "it was provided proxies = {'https': self.gitter_proxy} if self.gitter_proxy else", "post to Gitter headers = {'content-type': 'application/json'} # set https", "alert(self, match): \"\"\" Send an alert. Match is a dictionary", "if type(self.watchers) != list: self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee']", "self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We used to support only", "= alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate']", "= '%s Both have common statuses of (%s). As such,", "return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject'])", "array_items == 'option': self.jira_args[arg_name] = [{'value': v} for v in", "type of Alerter. \"\"\" return {'type': 'Unknown'} def create_title(self, matches):", "we don't have on our radar # 2. A custom", "else # If it works, great! If not, it will", "if len(issues): return issues[0] def comment_on_ticket(self, ticket, match): text =", "# post to victorops headers = {'content-type': 'application/json'} # set", "aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body,", "arg_type = field['schema']['type'] # Handle arrays of simple types like", "spikes count = matches[0].get('spike_count') if count: title += ' -", "{'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}} if self.components: # Support", "a count aggregation so that we can see at a", "data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e:", "Gitter: %s\" % e) elastalert_logger.info(\"Alert sent to Gitter\") def get_info(self):", "At minimum, this should contain a field type corresponding to", "self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts()", "above for i in xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value", "message for each alert \"\"\" required_options = frozenset(['gitter_webhook_url']) def __init__(self,", "try: issues = self.client.search_issues(jql) except JIRAError as e: logging.exception(\"Error while", "in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers = { \"Content-Type\":", "of one of them: # {\"id\":\"customfield_12807\",\"name\":\"My Custom Field\",\"custom\":true,\"orderable\":true,\"navigable\":true,\"searchable\":true, # \"clauseNames\":[\"cf[12807]\",\"My", "match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s at %s:'", "else val for val in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif", "is None else val alert_text = alert_text.format(**kw) self.text += alert_text", "key in keys] + [count]) text += text_table.draw() + '\\n\\n'", "required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a static", "e: raise EAException(\"Error creating JIRA ticket using jira_args (%s): %s\"", "= subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder)", "= self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login',", "'cmdb_ci', 'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url =", "term, count in top_events: self.text += '%s: %s\\n' % (term,", "unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values =", "subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\", False) and subp.wait(): raise EAException(\"Non-zero exit code", "sent by HTTP POST. Encoded with JSON. \"\"\" def __init__(self,", "to twilio: %s\" % e) elastalert_logger.info(\"Trigger sent to Twilio\") def", "= alert_value alert_text_values = [missing if val is None else", "matched in the lookup_es_key call above for i in xrange(len(alert_subject_values)):", "raise EAException(\"Error posting to Exotel\"), None, sys.exc_info()[2] elastalert_logger.info(\"Trigger sent to", "exists if self.pipeline is not None and 'jira_ticket' in self.pipeline:", "response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException", "self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port,", "# If it works, great! If not, it will manifest", "payload = match if self.post_all_values else {} payload.update(self.post_static_payload) for post_key,", "Message=body, Subject=self.create_title(matches) ) elastalert_logger.info(\"Sent sns notification to %s\" % (self.sns_topic_arn))", "self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING VALUE>'", "jira server (built-ins and custom ones) fields = self.client.fields() for", "at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for", "self.rule['jira_issuetype'] # We used to support only a single component.", "of dictionaries of relevant information to the alert. \"\"\" if", "encountered summary_table_fields_with_count = summary_table_fields + ['count'] text += \"Aggregation resulted", "in self.rule.iteritems(): # If we find a field that is", "unique key encountered in the aggregation period for match in", "required_options = frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host =", "latter... raise Exception(\"Could not find a definition for the jira", "key_value: incident_key_values[i] = key_value incident_key_values = ['<MISSING VALUE>' if val", "= unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values", "self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support referencing", "= 'ElastAlert: %s' % (self.rule['name']) return subject def alert(self, matches):", "= self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body = self.create_alert_body(matches) #", "if 'alert_subject' not in self.rule: title = self.create_default_title(matches, True) else:", "self.rule['email'] = [self.rule['email']] # If there is a cc then", "{'https': self.servicenow_proxy} if self.servicenow_proxy else None payload = { \"description\":", "from smtplib import SMTP_SSL from smtplib import SMTPAuthenticationError from smtplib", "self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"] } try: response = requests.post(", "self.rule.get('jira_component')) # We used to support only a single label.", "self.last_command = command except KeyError as e: raise EAException(\"Error formatting", "field['schema']['type'] # Handle arrays of simple types like strings or", "# JIRAError may contain HTML, pass along only first 1024", "for each alert \"\"\" required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])", "issues = self.client.search_issues(jql) except JIRAError as e: logging.exception(\"Error while searching", "ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:", "if it isn't cc = self.rule.get('cc') if cc and isinstance(cc,", "int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return value def alert(self, match):", "summary_table_fields_with_count = summary_table_fields + ['count'] text += \"Aggregation resulted in", "convenience, support the scenario wherein the user only provides #", "% (self.project, title, date) if self.bump_in_statuses: jql = '%s and", "self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp =", "= '' if 'alert_text' not in self.rule: self.text += self.rule['name']", "import JIRAError from requests.exceptions import RequestException from staticconf.loader import yaml_loader", "client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e: raise EAException(\"Error posting", "are sent by HTTP POST. Encoded with JSON. \"\"\" def", "contents of the structure we're walking for key, value in", "body += '\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self, matches): text =", "component} for component in self.components] if self.labels: # Support single", "self.create_alert_body(matches) body = self.format_body(body) # post to Teams headers =", "sent to PagerDuty\") def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values =", "alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw =", "match in matches: payload = match if self.post_all_values else {}", "self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We used to", "self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject =", "account_file): \"\"\" Gets the username and password from an account", "simple types like strings or numbers if arg_type == 'array':", "JIRA from jira.exceptions import JIRAError from requests.exceptions import RequestException from", "EAException(\"Error formatting command: %s\" % (e)) # Run command and", "self.rule['twilio_from_number'] def alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'],", "correctly # If the schema information is not available, raise", "{'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): \"\"\" Send a Telegram", "are %s\" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This API", "walking for i, item in enumerate(copy.copy(root)): if type(item) == dict", "to SMTP host: %s\" % (e)) except SMTPAuthenticationError as e:", "value] # Handle non-array types else: # Simple string types", "= self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses", "issues[0] def comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp", "def alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number,", "as the key # This may not work, as the", "'stomp_login', 'stomp_password']) def alert(self, matches): alerts = [] qk =", "{ 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'],", "alerts with dashes if len(matches) > 1: body += '\\n----------------------------------------\\n'", "EAException from util import elastalert_logger from util import lookup_es_key from", "while self.text[-2:] != '\\n\\n': self.text += '\\n' def _add_custom_alert_text(self): missing", "elastalert_logger.info(\"Alert sent to HipChat room %s\" % self.hipchat_room_id) def get_info(self):", "{'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token',", "util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj,", "raise EAException(\"Error formatting command: %s\" % (e)) # Run command", "wherein the user only provides # a single value for", "defined on the jira server (built-ins and custom ones) fields", "Twilio\") def get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter):", "(%s) are set.' % \\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses)", "elastalert_logger.info(\"Trigger sent to Exotel\") def get_info(self): return {'type': 'exotel', 'exotel_account':", "'') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string", "alert \"\"\" required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter,", "for arg in self.pagerduty_incident_key_args] # Populate values with rule level", "in account_conf: raise EAException('Account file must have user and password", "match: A dictionary of relevant information to the alert. \"\"\"", "and jira_field not in self.known_field_list: # Remove the jira_ part.", "get_arbitrary_fields(self): # This API returns metadata about all the fields", "stderr = subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\", False) and subp.wait(): raise EAException(\"Non-zero", "in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields", "} proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None payload", "calling alert() self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): #", "it a list if it isn't cc = self.rule.get('cc') if", "everything into description. description = str(BasicMatchString(self.rule, match)) # Set proper", "# Add count for spikes count = matches[0].get('spike_count') if count:", "raise EAException(\"Error posting to twilio: %s\" % e) elastalert_logger.info(\"Trigger sent", "headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException as e:", "e) elastalert_logger.info( \"Alert sent to Telegram room %s\" % self.telegram_room_id)", "match_items = self.match.items() match_items.sort(key=lambda x: x[0]) for key, value in", "(pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for spikes count = matches[0].get('spike_count')", "to_addr = [recipient] elif 'email_add_domain' in self.rule: to_addr = [recipient", "for spikes count = matches[0].get('spike_count') if count: title += '", "= [command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else: command = [command_arg", "'') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url", "type elif arg_type == 'number': self.jira_args[arg_name] = int(value) elif arg_type", "response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException", "how to set it # Note this is only the", "'option': self.jira_args[arg_name] = [{'value': v} for v in value] else:", "','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr =", "sys.exc_info()[2] elastalert_logger.info(\"Trigger sent to Exotel\") def get_info(self): return {'type': 'exotel',", "ServiceNow alert \"\"\" required_options = set([ 'username', 'password', 'servicenow_rest_url', 'short_description',", "'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): \"\"\" Create an incident", "self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee =", "alerter publishes alerts via stomp to a broker. \"\"\" required_options", "*args): super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell = False if", "in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches): body", "self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s)", "stdout, stderr = subp.communicate(input=match_json) if self.rule.get(\"fail_on_non_zero_exit\", False) and subp.wait(): raise", "# This is necessary for search to work. Other special", "if alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i]", "key_value incident_key_values = ['<MISSING VALUE>' if val is None else", "self.twilio_from_number} class VictorOpsAlerter(Alerter): \"\"\" Creates a VictorOps Incident for each", "create_default_title(self, matches, for_search=False): # If there is a query_key, use", "to Telegram: %s\" % e) elastalert_logger.info( \"Alert sent to Telegram", "if val is None else val for val in alert_subject_values]", "(%s) and jira_bump_not_in_statuses (%s) are set.' % \\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))", "self.rule['email_add_domain'] = '@' + add_suffix def alert(self, matches): body =", "to show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)", "matches): text = '' if 'aggregation' in self.rule and 'summary_table_fields'", "# This may not work, as the key might actually", "body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8') body = body.replace('&', '&amp;')", "{'https': self.pagerduty_proxy} if self.pagerduty_proxy else None try: response = requests.post(", "= None self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting on existing", "= ['<MISSING VALUE>' if val is None else val for", "'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { \"information\": body.encode('UTF-8'),", "missing if val is None else val alert_text = alert_text.format(**kw)", "'jira_server', 'jira_watchers', ] # Some built-in jira types that can", "if it isn't already if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']]", "for command_arg in self.rule['command']] self.last_command = command except KeyError as", "_charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From'] =", "if self.hipchat_proxy else None payload = { 'color': self.hipchat_msg_color, 'message':", "self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id']", "password from an account file. :param account_file: Name of the", "super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell = False if isinstance(self.rule['command'],", "% response) except: raise EAException(\"Error posting to Exotel\"), None, sys.exc_info()[2]", "} try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload,", "a cc then also convert it a list if it", "self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy", "raise EAException(\"Error posting to VictorOps: %s\" % e) elastalert_logger.info(\"Trigger sent", "raise EAException(\"Error posting to ms teams: %s\" % e) elastalert_logger.info(\"Alert", "Simple string types if arg_type in ['string', 'date', 'datetime']: #", "self.rule = rule # pipeline object is created by ElastAlerter.send_alert()", "sent to Telegram room %s\" % self.telegram_room_id) def get_info(self): return", "'client': self.pagerduty_client_name, 'details': { \"information\": body.encode('UTF-8'), }, } # set", "= { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies = {'https':", "= lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if '@' in recipient:", "arg_type == 'option': self.jira_args[arg_name] = {'value': value} # Complex type", "commenting on ticket %s: %s\" % (ticket, e)) if self.pipeline", "'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token", "strValue = unicode(value) if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in", "self.gitter_webhook_url} class ServiceNowAlerter(Alerter): \"\"\" Creates a ServiceNow alert \"\"\" required_options", "intersection: msg = '%s Both have common statuses of (%s).", "e)) return None if len(issues): return issues[0] def comment_on_ticket(self, ticket,", "# 1. A built-in supported field in JIRA that we", "MS Teams\") def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class", "list if it isn't cc = self.rule.get('cc') if cc and", "self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token)", "key might actually be 'key', 'id', 'value', or something else", "self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url: try:", "self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def", "necessary for search to work. Other special characters and dashes", "None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self,", "custom alert title to be used, e.g. as an e-mail", "strings, but # in reality, they are required to be", "VictorOps\") def get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter):", "ServiceNow: %s\" % e) elastalert_logger.info(\"Alert sent to ServiceNow\") def get_info(self):", "fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport',", "recipient if 'email_add_domain' in self.rule: to_addr = [name + self.rule['email_add_domain']", "= {'content-type': 'application/json'} # set https proxy, if it was", "= ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr", "= {'https': self.victorops_proxy} if self.victorops_proxy else None payload = {", "for each alert \"\"\" required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self,", "technically may not work if there is a top-level rule", "arg in alert_text_args] # Support referencing other top-level rule properties", "for each alert \"\"\" required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self,", "super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def", "indices are sent by HTTP POST. Encoded with JSON. \"\"\"", "by HTTP POST. Encoded with JSON. \"\"\" def __init__(self, rule):", "self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self, matches): body", "= { \"message_type\": self.victorops_message_type, \"entity_display_name\": self.victorops_entity_display_name, \"monitoring_tool\": \"ElastAlert\", \"state_message\": body", "self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except", "setting it as an object, using 'name' as the key", "self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) # You can not add", "Alerter. \"\"\" return {'type': 'Unknown'} def create_title(self, matches): \"\"\" Creates", "admin has configured if jira_field.startswith('jira_') and jira_field not in self.known_field_list:", "= {'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}} if self.components: #", "it was provided proxies = {'https': self.victorops_proxy} if self.victorops_proxy else", "https proxy, if it was provided proxies = {'https': self.pagerduty_proxy}", "def alert(self, matches): body = self.create_alert_body(matches) # post to victorops", "failed to be added raise Exception( \"Exception encountered when trying", "alerts.append( '2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))", "%s' % (self.rule['name']) if for_search: return title title += '", "[self.rule['command']] self.new_style_string_format = False if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']:", "{'https': self.ms_teams_proxy} if self.ms_teams_proxy else None payload = { '@type':", "to the alert. \"\"\" if 'alert_subject' in self.rule: return self.create_custom_title(matches)", "to each alerters used by a rule before calling alert()", "'jira'} class CommandAlerter(Alerter): required_options = set(['command']) def __init__(self, *args): super(CommandAlerter,", "if 'user' not in account_conf or 'password' not in account_conf:", "e: raise EAException(\"Error posting to Gitter: %s\" % e) elastalert_logger.info(\"Alert", "'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server =", "there is a top-level rule property with the same name", "numbers if arg_type == 'array': # As a convenience, support", "def _add_match_items(self): match_items = dict([(x, y) for x, y in", "a field that is not covered by the set that", "self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match,", "subject def alert(self, matches): body = self.create_alert_body(matches) session = boto3.Session(", "+ add_suffix def alert(self, matches): body = self.create_alert_body(matches) # Add", "pagerduty: %s\" % e) elastalert_logger.info(\"Trigger sent to PagerDuty\") def get_incident_key(self,", "in self.pagerduty_incident_key_args] # Populate values with rule level properties too", "self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object): \"\"\"", "int(value) elif arg_type == 'option': self.jira_args[arg_name] = {'value': value} #", "(jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except JIRAError as e:", "self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile',", "a Telegram message via bot api for each alert \"\"\"", "email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate()", "True def alert(self, matches): # Format the command and arguments", "'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list):", "requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e:", "isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] # If there is a", "import TwilioRestException from twilio.rest import Client as TwilioClient from util", "at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s", "for name, kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) #", "[count]) text += text_table.draw() + '\\n\\n' return unicode(text) def create_default_title(self,", "for name in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] =", "self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com')", "get_info(self): return {'type': 'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): \"\"\"", "aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn,", "self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors',", "self.issue_type}} if self.components: # Support single component or list if", "+ '\\n' body += self.get_aggregation_summary_text(matches) for match in matches: body", "def get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): \"\"\"", "stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self):", "match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e:", "self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self, matches): body =", "self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body =", "in top_events: self.text += '%s: %s\\n' % (term, count) self.text", "'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg)", "if not ('schema' in field or 'type' in field['schema']): raise", "('schema' in field or 'type' in field['schema']): raise Exception(\"Could not", "- %s' % (qk) return subject def get_info(self): return {'type':", "and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for v", "return title title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))", "if it isn't bcc = self.rule.get('bcc') if bcc and isinstance(bcc,", "= title.replace(' - ', ' ') title = title.replace('\\\\', '\\\\\\\\')", "= frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url']", "self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) # Support referencing other top-level", "\"Accept\": \"application/json;charset=utf-8\" } proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else", "can be used as custom fields require special handling #", "in summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1", "alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args] # Support", "= frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule)", "None else val alert_text = alert_text.format(**kw) self.text += alert_text def", "= 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys,", "will bubble up self.jira_args[arg_name] = [{'name': v} for v in", "[{'name': v} for v in value] # Handle non-array types", "super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key =", "'Unknown'} def create_title(self, matches): \"\"\" Creates custom alert title to", "jira_labels: Only_One_Label if type(value) != list: value = [value] array_items", "call above for i in xrange(len(alert_text_values)): if alert_text_values[i] is None:", "None payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override,", "ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'isoformat'): return", "headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException(\"Error", "return obj.isoformat() else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object): \"\"\" Creates", "'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e:", "contain HTML, pass along only first 1024 chars raise EAException(\"Error", "Support referencing other top-level rule properties to avoid redundant copy/paste", "appear to be ok title = title.replace(' - ', '", "self.rule: self.text += self.rule['name'] + '\\n\\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type')", "the value correctly # If the schema information is not", "work if there is a top-level rule property with the", "] # Some built-in jira types that can be used", "data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException", "%s\" % e) elastalert_logger.info(\"Alert sent to MS Teams\") def get_info(self):", "in xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i]) if", "the aggregation period for match in matches: key_tuple = tuple([unicode(lookup_es_key(match,", "'\\n\\n': self.text += '\\n' def _add_custom_alert_text(self): missing = '<MISSING VALUE>'", "at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match,", "} try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status()", "find_existing_ticket(self, matches): # Default title, get stripped search version if", "is a dictionary of information about the alert. :param match:", "the schema information is not available, raise an exception since", "top-level rule properties # This technically may not work if", "found.' % ( msg, ','.join(intersection)) msg += ' This should", "it exists if self.pipeline is not None and 'jira_ticket' in", "# Check the schema information to decide how to set", "body def get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text:", "be ok title = title.replace(' - ', ' ') title", "of information about the alert. :param match: A dictionary of", "= self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url", "name # as an es result key, since it would", "hasattr(obj, 'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object):", "self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def", "'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self,", "val in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches):", "} try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status()", "field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name] =", "trying to add '{0}' as a watcher. Does the user", "'{0}' as a watcher. Does the user exist?\\n{1}\" .format( watcher,", "+= self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts in self.match.items(): if", "self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert", "command %s: %s\" % (' '.join(command), e)) def get_info(self): return", "= body.replace('`', \"'\") body = \"```{0}```\".format('```\\n\\n```'.join(x for x in body.split('\\n'))).replace('\\n``````',", "> 1: body += '\\n----------------------------------------\\n' body += u' ```' headers", "match)) # Separate text of aggregated alerts with dashes if", "self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses =", "requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except", "= self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object):", "== f[identifier].replace('_', ' ').lower()), None) if field: break if not", "is created by ElastAlerter.send_alert() # and attached to each alerters", "proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException(\"Error posting HTTP", "= frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches): alerts =", "jira.exceptions import JIRAError from requests.exceptions import RequestException from staticconf.loader import", "None, sys.exc_info()[2] except JIRAError as e: raise EAException(\"Error creating JIRA", "need to be updated on a case-by-case basis custom_string_types_with_special_handling =", "about all the fields defined on the jira server (built-ins", "headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException(\"Error posting", "self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent email to %s\" % (to_addr))", "= False if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format =", "common statuses of (%s). As such, no tickets will ever", "This API returns metadata about all the fields defined on", "elastalert_logger.info(\"Alert sent to ServiceNow\") def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url':", "self.text += '\\n' def _add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text", "raise EAException(\"Error posting to Exotel, response code is %s\" %", "= [name + self.rule['email_add_domain'] for name in to_addr] email_msg =", "both just in case for identifier in ['name', 'id']: field", "elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk],", "object is created by ElastAlerter.send_alert() # and attached to each", "self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else:", "in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for", "(str(e)[:1024])) try: if self.priority is not None: self.jira_args['priority'] = {'id':", "decide how to set the value correctly # If the", "elif 'alert_text_kw' in self.rule: kw = {} for name, kw_name", "ever be found.' % ( msg, ','.join(intersection)) msg += '", "alert(self, matches): body = self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key,", "used to support only a single component. This allows us", "Check the schema information to decide how to set the", "body = self.create_alert_body(matches) body = self.format_body(body) # post to Teams", "# Format the command and arguments try: if self.new_style_string_format: command", "to set a string value known_field_list = [ 'jira_account_file', 'jira_assignee',", "Jira ticket: %s\" % (self.issue)) if self.pipeline is not None:", "self.description + '\\n' body += self.get_aggregation_summary_text(matches) for match in matches:", "self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def", "self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"] } try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'],", "if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try: if self.smtp_ssl:", "type? # OR raise and fail to load the alert", "# Default title, get stripped search version if 'alert_subject' not", "special characters and dashes # directly adjacent to words appear", "if incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i]", "self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg =", "Gitter\") def get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter):", "else: top_events.sort(key=lambda x: x[1], reverse=True) for term, count in top_events:", "'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields': [] }", "uses a Python logger (by default, alerting to terminal). \"\"\"", "= lookup_es_key(match, es_key) headers = { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\"", "'title': self.create_title(matches), 'text': body } if self.ms_teams_theme_color != '': payload['themeColor']", "# This blob contains non-unicode, so lets pretend it's Latin-1", "the user-facing API a more representative name self.labels = self.rule.get('jira_labels',", "match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in", "(self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at %s:'", "self.issue_type = self.rule['jira_issuetype'] # We used to support only a", "know how to set it # Note this is only", "= self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from',", "else: return self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name}", "= body.replace('<', '&lt;') body = body.replace('>', '&gt;') return body def", "allows us to maintain backwards compatibility # while also giving", "response.raise_for_status() except RequestException as e: raise EAException(\"Error posting to Telegram:", "+= 'No events found.\\n' else: top_events.sort(key=lambda x: x[1], reverse=True) for", "⚠ ```\\n' % (self.create_title(matches)) for match in matches: body +=", "resulted in the following data for summary_table_fields ==> {0}:\\n\\n\".format( summary_table_fields_with_count", "if self.hipchat_message_format == 'html': body = body.replace('\\n', '<br />') #", "Number type elif arg_type == 'number': self.jira_args[arg_name] = int(value) elif", "= body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`', \"'\") body =", "watcher. Does the user exist?\\n{1}\" .format( watcher, ex )), None,", "return self.rule[strValue[1:-1]] else: return value def alert(self, match): \"\"\" Send", "% (timestamp, text) self.client.add_comment(ticket, comment) def alert(self, matches): title =", "body = self.create_alert_body(matches) body = self.format_body(body) # post to slack", "'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers',", "require special handling # Here is a sample of one", "elif arg_type == 'option': self.jira_args[arg_name] = {'value': value} # Complex", "get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): \"\"\" Creates", "{'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): \"\"\" Creates a Slack", "__init__(self, rule, match): self.rule = rule self.match = match def", "not, it will manifest itself as an API error that", "information for the jira field '{0}'\".format(normalized_jira_field)) arg_type = field['schema']['type'] #", "def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) #", "+= unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: body += '\\n----------------------------------------\\n'", "self.post_proxy} if self.post_proxy else None for url in self.post_url: try:", "= alert_value alert_subject_values = ['<MISSING VALUE>' if val is None", "required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url =", "1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key for key in", "(self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))", "cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise", "stripped search version if 'alert_subject' not in self.rule: title =", "val is None else val alert_text = alert_text.format(**kw) self.text +=", "e: raise EAException(\"Error posting to ServiceNow: %s\" % e) elastalert_logger.info(\"Alert", "know how to set # For anything else, we will", "to a broker. \"\"\" required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])", "'html': body = body.replace('\\n', '<br />') # Post to HipChat", "proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None payload =", "Exotel\"), None, sys.exc_info()[2] elastalert_logger.info(\"Trigger sent to Exotel\") def get_info(self): return", "elasticsearch indices are sent by HTTP POST. Encoded with JSON.", "self.project}, 'issuetype': {'name': self.issue_type}} if self.components: # Support single component", "{} payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match,", "'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid']", "already if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] # If there", "ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule):", "[] qk = self.rule.get('query_key', None) fullmessage = {} for match", "% (to_addr)) def create_default_title(self, matches): subject = 'ElastAlert: %s' %", "may not work, as the key might actually be 'key',", "\"\"\" Create an incident on PagerDuty for each alert \"\"\"", "self.pagerduty_proxy} if self.pagerduty_proxy else None try: response = requests.post( self.url,", "Remove the jira_ part. Convert underscores to spaces normalized_jira_field =", "self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception as ex: # Re-raise", "self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields': [] } ]", "that type? # OR raise and fail to load the", "and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] # If there is", "others that will need to be updated on a case-by-case", "to Exotel\"), None, sys.exc_info()[2] elastalert_logger.info(\"Trigger sent to Exotel\") def get_info(self):", "'no entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key,", "pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = \"This alert was triggered again at", "return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: # This", "requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as", "a string value known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses',", "like strings or numbers if arg_type == 'array': # As", "posting to Gitter: %s\" % e) elastalert_logger.info(\"Alert sent to Gitter\")", "'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value':", "alert(self, matches): body = self.create_alert_body(matches) # post to pagerduty headers", "(built-ins and custom ones) fields = self.client.fields() for jira_field, value", "custom ones) fields = self.client.fields() for jira_field, value in self.rule.iteritems():", "None: alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value alert_text_values", "'alert_subject' in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches):", "that we couldn't find that type? # OR raise and", "range(len(incident_key_values)): if incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value:", "in alert_text_args] # Support referencing other top-level rule properties #", "JIRA metadata says that these are strings, but # in", "self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args] #", "]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy", "self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) + '\\n' stdout, stderr =", "\"\"\" required_options = frozenset([]) def __init__(self, rule): self.rule = rule", "the file which contains user and password information. \"\"\" account_conf", "summary. :param matches: A list of dictionaries of relevant information", "jira_field.startswith('jira_') and jira_field not in self.known_field_list: # Remove the jira_", "formatting command: %s\" % (e)) # Run command and pipe", "Creates a HipChat room notification for each alert \"\"\" required_options", "add_suffix def alert(self, matches): body = self.create_alert_body(matches) # Add JIRA", "DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'isoformat'): return obj.isoformat() else:", "logging.error(\"Priority %s not found. Valid priorities are %s\" % (self.priority,", "return unicode(text) def create_default_title(self, matches): return self.rule['name'] def get_account(self, account_file):", "if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args]", "array_items == 'number': self.jira_args[arg_name] = [int(v) for v in value]", "'recipients': self.rule['email']} class JiraAlerter(Alerter): \"\"\" Creates a Jira ticket for", "'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain',", "except (SMTPException, error) as e: raise EAException(\"Error connecting to SMTP", "self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '')", "\"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"] } try: response = requests.post( self.servicenow_rest_url,", "alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post", "body.encode('UTF-8'), }, } # set https proxy, if it was", "{'type': 'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): \"\"\" Send alert", "% (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent email to %s\"", "to use only one or the other.' logging.warning(msg) self.jira_args =", "% (jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except JIRAError as", "RequestException as e: raise EAException(\"Error posting to slack: %s\" %", "is %s\" % response) except: raise EAException(\"Error posting to Exotel\"),", "\"subcategory\": self.rule['subcategory'], \"cmdb_ci\": self.rule['cmdb_ci'], \"caller_id\": self.rule[\"caller_id\"] } try: response =", "self.get_arbitrary_fields() except JIRAError as e: # JIRAError may contain HTML,", "rejected: %s\" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info(\"Sent email", "30) self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses =", "a JIRA admin has configured if jira_field.startswith('jira_') and jira_field not", "# If there is a query_key, use that in the", "boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish(", "self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')", "self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override =", "{ '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text':", "have to be passed as objects with an identifier 'key'", "json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: # This blob", "self.ms_teams_proxy} if self.ms_teams_proxy else None payload = { '@type': 'MessageCard',", "self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '')", "text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an", "elastalert_logger.info(\"Alert sent to Slack\") def get_info(self): return {'type': 'slack', 'slack_username_override':", "%s\" % self.telegram_room_id) def get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id}", "sent to Slack\") def get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override,", "Exotel from jira.client import JIRA from jira.exceptions import JIRAError from", "basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as e: # JIRAError", "metadata says that these are strings, but # in reality,", "SNS service \"\"\" required_options = frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter,", "self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def alert(self, matches): client =", "headers headers = { \"Content-Type\": \"application/json\", \"Accept\": \"application/json;charset=utf-8\" } proxies", "rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project']", "'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from", "_add_match_items(self): match_items = dict([(x, y) for x, y in self.match.items()", "we don't know how to set it # Note this", "Default title, get stripped search version if 'alert_subject' not in", "encountered in the aggregation period for match in matches: key_tuple", "+= '\\n----------------------------------------\\n' return body def get_aggregation_summary_text(self, matches): text = ''", "= self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None) # Deprecated self.profile", "x: x[0]) for key, value in match_items: if key.startswith('top_events_'): continue", "as ex: # Re-raise the exception, preserve the stack-trace, and", "alert \"\"\" required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter,", "HipChatAlerter(Alerter): \"\"\" Creates a HipChat room notification for each alert", "as e: raise EAException(\"Error posting HTTP Post alert: %s\" %", "'\\nJIRA ticket: %s' % (url) to_addr = self.rule['email'] if 'email_from_field'", "to be updated on a case-by-case basis custom_string_types_with_special_handling = [", "['string', 'date', 'datetime']: # Special case for multi-select custom types", "body = body.replace('>', '&gt;') return body def alert(self, matches): body", "import boto3 import requests import stomp from exotel import Exotel", "or list if type(self.watchers) != list: self.watchers = [self.watchers] if", "This should be simplified to use only one or the", "# context as to which watcher failed to be added", "add watchers on initial creation. Only as a follow-up action", "posting to HipChat: %s\" % e) elastalert_logger.info(\"Alert sent to HipChat", "self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {})", "returns metadata about all the fields defined on the jira", "if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as", "x in range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee): self.assignee", "add_suffix = self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] =", "if val is None: val = self.rule.get(name) kw[kw_name] = missing", "{'type': 'Unknown'} def create_title(self, matches): \"\"\" Creates custom alert title", "that we explicitly know how to set # For anything", "return alert_subject def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for match", "self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue =", "get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): \"\"\" Creates", "user exist?\\n{1}\" .format( watcher, ex )), None, sys.exc_info()[2] except JIRAError", "case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def", "match in matches: if qk in match: elastalert_logger.info( 'Alert for", "proxy, if it was provided proxies = {'https': self.ms_teams_proxy} if", "fields require special handling # Here is a sample of", "If there is a cc then also convert it a", "None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override", "if field: break if not field: # Log a warning", "\"```{0}```\".format('```\\n\\n```'.join(x for x in body.split('\\n'))).replace('\\n``````', '') return body def alert(self,", "Encoded with JSON. \"\"\" def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url", "self.rule: title = self.create_default_title(matches, True) else: title = self.create_title(matches) if", "date) if self.bump_in_statuses: jql = '%s and status in (%s)'", "else None payload = { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode':", "updated on a case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',", "self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches): client", "self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches):", "elif array_items == 'number': self.jira_args[arg_name] = [int(v) for v in", "% (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type':", "has a query_key, add that value plus timestamp to subject", "'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password", "body[:9980] + '..(truncated)' # Use appropriate line ending for text/html", "passed as objects with an identifier 'key' elif array_items ==", "connecting to SMTP host: %s\" % (e)) except SMTPAuthenticationError as", "along only first 1024 chars raise EAException(\"Error connecting to JIRA:", "self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response != 200:", "= self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC'] =", "basestring): self.rule['email'] = [self.rule['email']] # If there is a cc", "self.create_alert_body(matches) body = self.format_body(body) # post to slack headers =", "self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name] = value #", "'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response", "self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value incident_key_values = ['<MISSING VALUE>'", "there is a query_key, use that in the title if", "this should contain a field type corresponding to the type", "object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name]", "too for i in range(len(incident_key_values)): if incident_key_values[i] is None: key_value", "Both have common statuses of (%s). As such, no tickets", "= {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None try: response =", "def alert(self, matches): for match in matches: # Parse everything", "self.text[-2:] != '\\n\\n': self.text += '\\n' def _add_custom_alert_text(self): missing =", "self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except", "UnicodeDecodeError: # This blob contains non-unicode, so lets pretend it's", "rule properties to avoid redundant copy/paste if type(root) == list:", "also convert it to a list if it isn't bcc", "slack: %s\" % e) elastalert_logger.info(\"Alert sent to Slack\") def get_info(self):", "EAException(\"Error posting to HipChat: %s\" % e) elastalert_logger.info(\"Alert sent to", "not in self.rule: title = self.create_default_title(matches, True) else: title =", "self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url',", "list: # Make a copy since we may be modifying", "if bcc and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix =", "self.rule['name'] + '\\n\\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text()", "if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches,", "single label. This allows us to maintain backwards compatibility #", "self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def alert(self,", "= ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] = title", "alerts = [] qk = self.rule.get('query_key', None) fullmessage = {}", "ensure_ascii=False) def __str__(self): self.text = '' if 'alert_text' not in", "field = next((f for f in fields if normalized_jira_field ==", "payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url in", "types (the JIRA metadata says that these are strings, but", "self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr = recipient if 'email_add_domain' in", "string containing fields in match for the given rule. \"\"\"", "self.components] if self.labels: # Support single label or list if", "return text def create_default_title(self, matches, for_search=False): # If there is", "def get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter): required_options = set(['command'])", "on messages longer than 10000 characters if (len(body) > 9999):", "arrays of complex types that have to be passed as", "in self.post_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)", "body = self.create_alert_body(matches) # HipChat sends 400 bad request on", "bot api for each alert \"\"\" required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])", "+ self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try:", "set.' % \\ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))", "Log a warning to ElastAlert saying that we couldn't find", "strValue.endswith('$') and strValue[1:-1] in self.rule: if type(value) == int: return", "title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name'])", "'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [", "\"\"\" def __init__(self, rule, match): self.rule = rule self.match =", "if '@' in recipient: to_addr = [recipient] elif 'email_add_domain' in", "import subprocess import sys import warnings from email.mime.text import MIMEText", "payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url: try: response =", "self.gitter_proxy else None payload = { 'message': body, 'level': self.gitter_msg_level", "(jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s and status not", "= value elif array_items == 'number': self.jira_args[arg_name] = [int(v) for", "self.sms_body = self.rule.get('exotel_message_body', '') def alert(self, matches): client = Exotel(self.exotel_account_sid,", "import Texttable from twilio.base.exceptions import TwilioRestException from twilio.rest import Client", "password information. \"\"\" account_conf = yaml_loader(account_file) if 'user' not in", "type(self.labels) != list: self.labels = [self.labels] self.jira_args['labels'] = self.labels if", "f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)", "to shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format = False if", "x: x[1], reverse=True) for term, count in top_events: self.text +=", "['text', 'pretext'], 'fields': [] } ] } if self.slack_icon_url_override !=", "in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if", "and password from an account file. :param account_file: Name of", "self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self, matches): # Format the", "'<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule:", "kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) # Support referencing", "try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: #", "required_options = frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url =", "None) for match in matches: if qk in match: elastalert_logger.info(", "endpoint(s). \"\"\" for match in matches: payload = match if", "= True if '%' in self.rule['command']: logging.warning('Warning! You could be", "body = body.replace('<', '&lt;') body = body.replace('>', '&gt;') return body", "to JIRA: %s\" % (str(e)[:1024])) try: if self.priority is not", "non-array types else: # Simple string types if arg_type in", "% (jql, e)) return None if len(issues): return issues[0] def", "in self.rule['command']: logging.warning('Warning! You could be vulnerable to shell injection!')", "self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body',", "arg in alert_subject_args] # Support referencing other top-level rule properties", "[self.rule['email']] # If there is a cc then also convert", "unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values", "= 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else:", "%s+ events' % (count) return title def get_info(self): return {'type':", "# This technically may not work if there is a", "field that a JIRA admin has configured if jira_field.startswith('jira_') and", "to_addr = to_addr + self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port:", "name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We used to support", "is a bcc then also convert it to a list", "'{0}'\".format(normalized_jira_field)) arg_name = field['id'] # Check the schema information to", "<filename>elastalert/alerts.py<gh_stars>0 # -*- coding: utf-8 -*- import copy import datetime", "# We used to support only a single component. This", "Special case for custom types (the JIRA metadata says that", "( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self, matches):", "{}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload)", "in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw", "== list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value):", "self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, \"sendMessage\") self.telegram_proxy = self.rule.get('telegram_proxy',", "single component or list if type(self.components) != list: self.jira_args['components'] =", "single value for a multi-value field e.g. jira_labels: Only_One_Label if", "= [command_arg % matches[0] for command_arg in self.rule['command']] self.last_command =", "title = title.replace('\\\\', '\\\\\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql", "else None for url in self.post_url: try: response = requests.post(url,", "and password information. \"\"\" account_conf = yaml_loader(account_file) if 'user' not", "'\\n----------------------------------------\\n' body += u' ```' headers = {'content-type': 'application/json'} #", "Client as TwilioClient from util import EAException from util import", "blob contains non-unicode, so lets pretend it's Latin-1 to show", "certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp", "v in value] else: self.jira_args[arg_name] = value elif array_items ==", "will manifest itself as an API error that will bubble", "for identifier in ['name', 'id']: field = next((f for f", "Gets the username and password from an account file. :param", "in self.match.items() if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text =", "self.post_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status()", "= SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file,", "running command %s: %s\" % (' '.join(command), e)) def get_info(self):", "self.rule['email'] if 'email_from_field' in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if", "other top-level rule properties to avoid redundant copy/paste if type(root)", "tickets will ever be found.' % ( msg, ','.join(intersection)) msg", "available, raise an exception since we don't know how to", "Creates a Gitter activity message for each alert \"\"\" required_options", "You could be vulnerable to shell injection!') self.rule['command'] = [self.rule['command']]", "= u'⚠ *%s* ⚠ ```\\n' % (self.create_title(matches)) for match in", "match in matches: # Parse everything into description. description =", "in self.rule: qk = matches[0].get(self.rule['query_key']) if qk: subject += '", "= recipient if 'email_add_domain' in self.rule: to_addr = [name +", "'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] # Some built-in jira", "counts in self.match.items(): if key.startswith('top_events_'): self.text += '%s:\\n' % (key[11:])", "i in xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i])", "self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile =", "body } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)", "= self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self, body):", "= alert_text.format(**kw) self.text += alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match)", "if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if", "to be used, e.g. as an e-mail subject or JIRA", "= self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number =", "rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None)", "of priority index to id. \"\"\" priorities = self.client.priorities() self.priority_ids", "self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s at %s:' % (self.rule['name'],", "# -*- coding: utf-8 -*- import copy import datetime import", "% (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for spikes count =", "self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string =", "in field or 'type' in field['schema']): raise Exception(\"Could not determine", "'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule)", "[recipient] elif 'email_add_domain' in self.rule: to_addr = [recipient + self.rule['email_add_domain']]", "to terminal). \"\"\" def alert(self, matches): qk = self.rule.get('query_key', None)", "it was provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else", "self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from =", "= [summary_table_fields] # Include a count aggregation so that we", "= self.rule.get('jira_components', self.rule.get('jira_component')) # We used to support only a", "'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title',", "%s: %s\" % (ticket, e)) if self.pipeline is not None:", "the stack-trace, and give some # context as to which", "= requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies )", "up self.jira_args[arg_name] = [{'name': v} for v in value] #", "payload[post_key] = lookup_es_key(match, es_key) headers = { \"Content-Type\": \"application/json\", \"Accept\":", "# Complex type else: self.jira_args[arg_name] = {'name': value} def get_priorities(self):", "(self.jira_args, e)) elastalert_logger.info(\"Opened Jira ticket: %s\" % (self.issue)) if self.pipeline", "msg = '%s Both have common statuses of (%s). As" ]
[ "log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name':", "assert scheduler._check_processes_pending is True @pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker): #", "is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 ==", "isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet", "len(scheduler._schedules) calls = [call('Starting'), call('Starting Scheduler: Management port received is", "pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = \"\"", "from fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync __author__ =", "in args3 @pytest.mark.asyncio async def test__schedule_first_task(self, mocker): # TODO: Mandatory", "return {\"count\": 1} @classmethod async def delete_from_tbl(cls, table_name, condition=None): pass", "tested during System tests.\") async def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio", "new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN # THEN", "await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo:", "pass @classmethod async def query_tbl_with_payload(cls, table_name, query_payload): if table_name ==", "None assert tasks[0].start_time is not None assert tasks[0].end_time is None", "message = await scheduler.disable_schedule(sch_id) # THEN assert status is True", "WHEN await scheduler.start() # THEN assert scheduler._ready is True assert", "= mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow(", "enabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN", "await scheduler.delete_schedule(sch_id) # THEN # Now confirm no schedule is", "log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler, 'queue_task',", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger,", "# WHEN retval = await scheduler.stop() # THEN assert retval", "mock_task(): return \"\" async def mock_process(): m = MagicMock() m.pid", "(\"Queued schedule '%s' for execution\", 'OMF to PI north') ==", "= asyncio.ensure_future(get_cat()) # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "] }, ] tasks = [ { \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\":", "= await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self, mocker): @asyncio.coroutine def", "not fit for unit test.\") @pytest.mark.asyncio async def test__terminate_child_processes(self, mocker):", "return_value=_rv) await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to", "= time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\",", "# Now queue task and assert that the task has", "THEN assert status is True assert message == \"Schedule successfully", "# assert (\"Queued schedule '%s' for execution\", 'OMF to PI", "= mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule = scheduler._ScheduleRow(", "None assert scheduler._task_processes is None assert scheduler._schedules is None assert", ">= 8: _rv = await get_cat() else: _rv = asyncio.ensure_future(get_cat())", "kwargs3 = log_info.call_args_list[3] assert 'stats collection' in args0 assert 'COAP", "len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) # WHEN # Now", "mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler,", "assert 'stats collection' in args0 assert 'COAP listener south' in", "\"info\") return scheduler, schedule, log_info, log_exception, log_error, log_debug @pytest.mark.asyncio async", "async def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker):", "'_resume_check_schedules') # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await", "pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self, mocker):", "time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id", "@pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler = Scheduler()", "Assert that there is no North task queued for schedule", "task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks", "execution\", 'OMF to PI north') == args args, kwargs =", "mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\")", "def test__get_process_scripts_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "3 and sys.version_info.minor >= 8: _rv = await mock_process() else:", "{'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': False, 'type':", "mock_task_id = uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions", "@classmethod async def query_tbl_with_payload(cls, table_name, query_payload): if table_name == 'tasks':", "# THEN # Now confirm there is one schedule less", "current_time) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time.mktime(curr_time.timetuple())", "type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger,", "Now delete schedule await scheduler.delete_schedule(sch_id) # THEN # Now confirm", "task.state is not None assert task.cancel_requested is None assert task.start_time", "mock_task_process = scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process = await asyncio.create_subprocess_exec(\"sleep\",", "\"name\": \"backup\", \"script\": [ \"tasks/backup_postgres\" ] }, { \"name\": \"COAP\",", "= await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id,", "be running at any given time\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS),", "temp_schedule assert str(ex).endswith('day must be between 1 and 7') @pytest.mark.asyncio", "1 == queue_task.call_count calls = [call(\"Enabled Schedule '%s/%s' process '%s'\\n\",", "there is no task queued for this schedule at first", "1 == get_cat.call_count assert scheduler._max_running_tasks is not None assert scheduler._max_completed_task_age", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) random_schedule_id =", "schedule.repeat == schedule_row[5] assert schedule.exclusive == schedule_row[7] assert schedule.enabled ==", "pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234", "async def test_save_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "HH24:MI:SS.MS\"}, {\"alias\": \"end_time\", \"column\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"],", "1 == log_info.call_count # assert call(\"Queued schedule '%s' for execution\",", "= sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 4", "audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'OMF to PI north',", "len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async def test_get_schedule(self, mocker): # GIVEN", "Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule '%s/%s' process '%s'\\n\", 'OMF to", "await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id) #", "assert 'OMF to PI north' in args2 @pytest.mark.asyncio async def", "+ 1 == len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD',", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await scheduler._read_storage()", "__version__ = \"${VERSION}\" async def mock_task(): return \"\" async def", "is True # WHEN await scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id],", "disabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False assert", "True) mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id = uuid.uuid4()", "Readings to PI' in args @pytest.mark.asyncio async def test_cancel_task_exception(self, mocker):", "return m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class TestScheduler: async def scheduler_fixture(self, mocker):", "None assert scheduler._schedules is None assert scheduler._process_scripts is None assert", "return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule =", ">= 8: _rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process())", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await", "= log_info.call_args_list[1] assert \"Stopping process: Schedule '%s' process '%s' task", "target is an async function. if sys.version_info.major == 3 and", "log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow(", "'OMF to PI north') == args args, kwargs = log_info.call_args_list[0]", "pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo: await", "repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler,", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger,", "return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1)))", "None assert 2 == log_info.call_count # args, kwargs = log_info.call_args_list[0]", "async def test_delete_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat must", "sleep above, no task processes should be left pending assert", "\"North Readings to OCS\", \"schedule_name\": \"OMF to OCS north\", \"schedule_type\":", "scheduler.start() # THEN assert scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes) ==", "TODO: Remove after implementation of above test test__read_config() mocker.patch.object(scheduler, '_read_config',", "await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError)", "True assert schedule.process_name == \"purge\" @pytest.mark.asyncio async def test_get_schedule_exception(self, mocker):", "return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules)", "None @pytest.mark.asyncio async def test__check_purge_tasks(self, mocker): # TODO: Mandatory -", "As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence #", "name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess')", "9999 m.terminate = lambda: True return m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class", "assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is '' assert tasks[0].state", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) #", "log_info.call_count calls = [call('No Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'),", "implemented in main Scheduler class.\") async def test__mark_tasks_interrupted(self, mocker): pass", "PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler,", "}, { \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats collector\", \"schedule_name\": \"stats collection\",", "is False assert scheduler._start_time is None calls = [call('Processing stop", "assert scheduler._purge_tasks_task is not None @pytest.mark.asyncio async def test__check_schedules(self, mocker):", "with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self,", "\"backup hourly\", \"schedule_type\": 3, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0,", "assert (scheduler._schedules[sch_id]).enabled is True assert 1 == queue_task.call_count calls =", "\"COAP listener south\", \"schedule_type\": 1, \"schedule_interval\": \"00:00:00\", \"schedule_time\": \"\", \"schedule_day\":", "update_tbl(cls, table_name, payload): # Only valid for test_save_schedule_update if table_name", "Readings to PI\", name=\"OMF to PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30,", "be left pending assert 0 == len(scheduler._task_processes) assert 0 ==", "mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions = dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id]", "kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 =", "scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) ==", "await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) # Confirm there are 14", "@pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker): # GIVEN scheduler, schedule, log_info,", "that cancel request has not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is", "-*- coding: utf-8 -*- # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ #", "# WHEN # Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules()", "'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task is", "= sch_execution.next_start_time # WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds)", "= mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch =", "assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine", "'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})]", "30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 ==", "= scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30,", "= list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel request has not been", "type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True)", "class MockStorageAsync(StorageClientAsync): schedules = [ { \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\",", "hence # \"stat collector\" appears twice in this list. assert", "= scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10,", "side_effect=Exception()) # WHEN # THEN task_id = uuid.uuid4() with pytest.raises(Exception)", "str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self, mocker): # GIVEN scheduler", "uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup await scheduler._get_schedules() # Confirm no. of schedules", "'%s' process '%s' task %s pid %s\\n%s\" in args assert", "== audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess',", "temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day must", "}, ] scheduled_processes = [ { \"name\": \"purge\", \"script\": [", "Assert that there is no task queued for mock_schedule with", "scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as", "copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN #", "queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no", "await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() # WHEN # THEN with", "= await scheduler.disable_schedule(sch_id) # THEN assert status is True assert", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug =", "mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"),", "collector\" appears twice in this list. assert 'stats collection' in", "task.end_time is not None assert task.exit_code is '0' @pytest.mark.skip(\"Need a", "mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts',", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id", "assert schedule.exclusive == schedule_row[7] assert schedule.enabled == schedule_row[8] assert schedule.process_name", "len(schedules) @pytest.mark.asyncio async def test_get_schedule(self, mocker): # GIVEN scheduler, schedule,", "m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class TestScheduler: async def scheduler_fixture(self, mocker): #", "@pytest.mark.asyncio async def test__check_purge_tasks(self, mocker): # TODO: Mandatory - Add", "WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN # After task completion, sleep", "9999)] log_info.assert_has_calls(calls, any_order=True) calls = [call('Database command: %s', 'scheduled_processes'), call('Database", "log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotFoundError)", "assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks = await scheduler.get_running_tasks()", "async def test__check_purge_tasks(self, mocker): # TODO: Mandatory - Add negative", "mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task',", "= mocker.patch.object(scheduler._logger, \"exception\") log_error = mocker.patch.object(scheduler._logger, \"error\") log_debug = mocker.patch.object(scheduler._logger,", "calls = [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO:", "queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))", "there are 14 schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN", "core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() curr_time =", "Changed in version 3.8: patch() now returns an AsyncMock if", "to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that task has not", "enabled=True, process_name='TestProcess') # WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN", "GIVEN scheduler = Scheduler() schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow(", "= await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup queue_task =", "is one schedule less assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules)", "is True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "assert tasks[0].reason is '' assert tasks[0].state is not None assert", "assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True assert 1", "calls = [call(\"Enabled Schedule '%s/%s' process '%s'\\n\", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a',", "test_cancel_task_all_ok(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "\"schedule_interval\": \"00:00:30\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\"", "collection\", \"schedule_type\": 2, \"schedule_interval\": \"00:00:15\", \"schedule_time\": \"00:00:15\", \"schedule_day\": 3, \"exclusive\":", "'_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro())", "scheduler._read_config() # THEN assert 1 == cr_cat.call_count assert 1 ==", "class TestScheduler: async def scheduler_fixture(self, mocker): # Changed in version", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) #", "'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id) #", "= \"\" await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can not", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert", "kwargs = log_info.call_args_list[0] assert 'OMF to PI north' in args", "can't be used in 'await' expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def", "main Scheduler class.\") async def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async", "THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self, mocker):", "THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker):", "has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed", "}, { \"name\": \"COAP\", \"script\": [ \"services/south\" ] }, {", "assert time_after_call > time_before_call assert 3 == log_info.call_count args0, kwargs0", "return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\",", "self.scheduler_fixture(mocker) # Assert that there is no North task queued", "started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count #", "= uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN # THEN with pytest.raises(ScheduleNotFoundError)", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") #", "status is True assert message == \"Schedule successfully enabled\" assert", "MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) # WHEN processes", "mocker.patch.object(scheduler, '_ready', True) # Confirm there are 14 schedules assert", "assert tasks[0].state is not None assert tasks[0].cancel_requested is None assert", "%s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self, mocker): # GIVEN", "True return m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class TestScheduler: async def scheduler_fixture(self,", "scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time is", "= mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time()", "}, { \"name\": \"stats collector\", \"script\": [ \"tasks/statistics\" ] },", "\"OMF to OCS north\", \"schedule_type\": 3, \"schedule_interval\": \"1 day 00:00:40\",", "assert scheduler._max_running_tasks is None assert scheduler._max_completed_task_age is None await scheduler._read_config()", "PI north\", \"schedule_type\": 3, \"schedule_interval\": \"00:00:30\", \"schedule_time\": \"\", \"schedule_day\": 0,", "False assert scheduler._paused is False assert scheduler._start_time is None calls", "- Add negative tests for full code coverage # GIVEN", "len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0] assert 'OMF to PI north'", "\"purge\", \"schedule_name\": \"purge\", \"schedule_type\": 4, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\":", "= mocker.patch.object(scheduler._logger, \"info\") schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI", "North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(),", "args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats", "return scheduler, schedule, log_info, log_exception, log_error, log_debug @pytest.mark.asyncio async def", "PI north' in args assert 'North Readings to PI' in", "assert status is True assert message == \"Schedule successfully disabled\"", "scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector sch = scheduler._schedules[sch_id]", "@pytest.mark.asyncio async def test_get_schedules(self, mocker): # GIVEN scheduler, schedule, log_info,", "def test__wait_for_task_completion(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "# Changed in version 3.8: patch() now returns an AsyncMock", "+= datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call = sch_execution.next_start_time # THEN", "scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger, \"info\")", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True,", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules()", "await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError)", "json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_get_tasks(self, mocker):", "> time_before_call assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0]", "scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler,", "dict() mock_task_process.process = await asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule = mock_schedule mock_task_id", "p @pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker): # GIVEN scheduler, schedule,", "14 schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN # Now", "= await self.scheduler_fixture(mocker) # Assert that there is no task", "schedule '%s' for execution\", 'OMF to PI north') == log_info.call_args_list[0]", "async def test__get_process_scripts(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params = \"Queued schedule '%s' for execution\",", "hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count calls", "\"rows\": MockStorageAsync.schedules } if table_name == 'scheduled_processes': return { \"count\":", "('00:25:61', True) ]) async def test__get_schedules(self, test_interval, is_exception, mocker): #", "args3 @pytest.mark.asyncio async def test__schedule_first_task(self, mocker): # TODO: Mandatory -", "= mocker.patch.object(scheduler._logger, \"exception\") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync,", "assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async def test_get_schedule(self, mocker): #", "test.\") @pytest.mark.asyncio async def test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync): def", "assert task.end_time is not None assert task.exit_code is '0' @pytest.mark.skip(\"Need", "north\", \"schedule_type\": 3, \"schedule_interval\": \"1 day 00:00:40\", \"schedule_time\": \"\", \"schedule_day\":", "async def test_cancel_task_all_ok(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert", "await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can not be empty\")", "task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks = await scheduler.get_tasks() #", "test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def test_disable_schedule(self, mocker): # GIVEN scheduler", "args args, kwargs = log_info.call_args_list[1] assert \"Stopping process: Schedule '%s'", "THEN assert scheduler._check_processes_pending is False # WHEN # Check ELSE", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\",", "None @pytest.mark.asyncio async def test_get_task(self, mocker): # GIVEN scheduler, schedule,", "__copyright__ = \"Copyright (c) 2017 OSIsoft, LLC\" __license__ = \"Apache", "3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker):", "await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError)", "== \"Schedule successfully enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled", "is None assert task.start_time is not None assert task.end_time is", "await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup queue_task = mocker.patch.object(scheduler,", "args assert 'OMF to PI north' in args assert 'North", "= mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await", "id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\", name=\"OMF to PI north\", type=Schedule.Type.INTERVAL,", "False), ('0 day 12:30:11', False), ('1 day 12:40:11', False), ('2", "%s', 'scheduled_processes'), call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row", "schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) # WHEN", "tasks[0].cancel_requested is None assert tasks[0].start_time is not None assert tasks[0].end_time", "# Confirm that cancel request has not been made assert", "def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup await scheduler._get_schedules() # Confirm", "return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings", "scheduler._last_task_purge_time is not None @pytest.mark.asyncio async def test__check_purge_tasks(self, mocker): #", "@pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker): # GIVEN scheduler = Scheduler()", "scheduler._schedule_executions is None assert scheduler._task_processes is None assert scheduler._schedules is", "'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio", "assert (scheduler._schedules[sch_id]).enabled is False log_params = \"Schedule %s already disabled\",", "_ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN await scheduler.purge_tasks() #", "\"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North Readings to OCS\", \"schedule_name\": \"OMF to", "sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) #", "uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) # Confirm there", "def test__schedule_next_task(self, mocker): # TODO: Mandatory - Add negative tests", "\"max_running_tasks\": { \"description\": \"The maximum number of tasks that can", "assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_id assert schedule.name ==", "audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler", "len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks = await scheduler.get_running_tasks() # THEN assert", "'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0',", "scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information',", "2, \"schedule_interval\": \"00:00:15\", \"schedule_time\": \"00:00:15\", \"schedule_day\": 3, \"exclusive\": \"f\", \"enabled\":", "log_params = \"Schedule %s already enabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async", "TODO: Mandatory - Add negative tests for full code coverage", "mock_schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\", name=\"OMF to", "and sys.version_info.minor >= 8: _rv = await mock_process() else: _rv", "log_args = 'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def", "scheduler._task_processes is None assert scheduler._schedules is None assert scheduler._process_scripts is", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler,", "\"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\", \"schedule_name\": \"COAP listener south\", \"schedule_type\": 1,", "log_exception.call_count log_params = 'Attempt to delete an enabled Schedule %s.", "= mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler,", "first_task.call_count assert 1 == resume_sch.call_count assert 1 == enable_schedule.call_count assert", "assert 0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) #", "== log_info.call_count # assert call(\"Queued schedule '%s' for execution\", 'OMF", "'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value=\"North", "'_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time - 3600)", "delete schedule await scheduler.delete_schedule(sch_id) # THEN # Now confirm there", "log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup", "WHEN await scheduler._start_task(schedule) # THEN # Confirm that task has", "scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) random_schedule_id", "\"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats collector\", \"schedule_name\": \"stats collection\", \"schedule_type\": 2,", "pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None", "sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 3 ==", "not None assert task.exit_code is '0' @pytest.mark.skip(\"Need a suitable fixture\")", "to PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count calls = [call('SCHCH',", "# Assert that there is no North task queued for", "__author__ = \"<NAME>\" __copyright__ = \"Copyright (c) 2017 OSIsoft, LLC\"", "return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id) # THEN", "test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "= [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO: Find", "False # WHEN # Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None)", "scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async def", "log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge", "@asyncio.coroutine def mock_coro(): return \"\" # GIVEN scheduler, schedule, log_info,", "for unit test.\") @pytest.mark.asyncio async def test__terminate_child_processes(self, mocker): pass class", "log_exception = mocker.patch.object(scheduler._logger, \"exception\") log_error = mocker.patch.object(scheduler._logger, \"error\") log_debug =", "is None assert scheduler._last_task_purge_time is not None @pytest.mark.asyncio async def", "# GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await scheduler._read_storage() # THEN", "True) ]) async def test__get_schedules(self, test_interval, is_exception, mocker): # GIVEN", "schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North schedule_row =", "assert 'stats collection' in args3 @pytest.mark.asyncio async def test__get_process_scripts(self, mocker):", "= await asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule = mock_schedule mock_task_id = uuid.uuid4()", "== len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self, mocker): # GIVEN scheduler", "async def test_start(self, mocker): # TODO: Mandatory - Add negative", "repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger, \"exception\") log_error", "1} @classmethod async def delete_from_tbl(cls, table_name, condition=None): pass @classmethod async", "\"exit_code\"], \"where\": {\"column\": \"id\", \"condition\": \"=\", \"value\": str(task_id)}} args, kwargs", "\"\" await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can not be", "WHEN await scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task is None assert", "\"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed in version 3.8: patch()", "\"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed in", "= scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30,", "PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls) assert 1", "True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as", "log_exception.call_args assert 'Query failed: %s' == args[0] p = json.loads(args[1])", "# WHEN tasks = await scheduler.get_running_tasks() # THEN assert 1", "\"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"end_time\"}, \"reason\", \"exit_code\"], \"where\": {\"column\":", "import Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions", "def test__check_schedules(self, mocker): # TODO: Mandatory - Add negative tests", "mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN status,", "= MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler, '_schedule_first_task') await", "between 1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To be done\") async def", "'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1", "# Now queue task and assert that the North task", "\"exclusive\": \"t\", \"enabled\": \"f\" }, ] scheduled_processes = [ {", "'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self, mocker): # GIVEN scheduler", "== len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not implemented", "north' in args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not suitable for unit testing.", "await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotFoundError) as excinfo:", "THEN # After task completion, sleep above, no task processes", "mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler,", "= mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process =", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN", "North # WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN", "kwargs = log_info.call_args_list[0] # assert (\"Queued schedule '%s' for execution\",", "scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1", "def _get_storage_service(self, host, port): return { \"id\": uuid.uuid4(), \"name\": \"Fledge", "def test_delete_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = \"\" await", "excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and", "@pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\"", "pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await scheduler._read_storage() #", "tasks[0].process_name assert tasks[0].reason is '' assert tasks[0].state is not None", "== len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not implemented in main Scheduler class.\")", "kwargs = log_info.call_args_list[1] assert \"Stopping process: Schedule '%s' process '%s'", "= test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN # THEN if", "30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type':", "should be left pending assert 0 == len(scheduler._task_processes) assert 0", "log_info = mocker.patch.object(scheduler._logger, \"info\") enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule", "schedule_row) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules)", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task()))", "process '%s' task %s pid %s\\n%s\" in args assert 'OMF", "log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self, mocker): # GIVEN scheduler =", "# THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async", "mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings", "= sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 3", "_scheduler_loop_task calls = [call('An exception was raised by Scheduler._purge_tasks %s',", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap", "THEN assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is '' assert", "to OCS\", \"script\": [ \"tasks/north\", \"--stream_id\", \"4\", \"--debug_level\", \"1\" ]", "scheduler._get_schedules() sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler,", "side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN", "test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker): # TODO:", "is None assert scheduler._ready is False assert scheduler._paused is False", "del temp_schedule assert str(ex).endswith('time must be of type datetime.time') with", "\"http\" } class MockStorageAsync(StorageClientAsync): schedules = [ { \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\",", "= MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mock_schedules = dict()", "calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL,", "@pytest.mark.asyncio async def test__schedule_next_task(self, mocker): # TODO: Mandatory - Add", "log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to", "= 0 temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule assert", "schedule_id = uuid.uuid4() # WHEN # THEN with pytest.raises(ScheduleNotFoundError): schedule", "1 == len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN", "3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 =", "== len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name':", "str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed in version 3.8: patch() now", "scheduler._check_schedules() # THEN assert earliest_start_time is not None assert 3", "is False assert 2 == log_info.call_count calls = [call('No Task", "log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is", "test_disable_schedule(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") #", "\"state\", {\"alias\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"start_time\"}, {\"alias\": \"end_time\",", "= uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions =", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler,", "\"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\"", "True assert message == \"Schedule successfully disabled\" assert (scheduler._schedules[sch_id]).id ==", "\"Stopping process: Schedule '%s' process '%s' task %s pid %s\\n%s\"", "calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat':", "not None assert 2 == log_info.call_count # args, kwargs =", "tasks[0].start_time is not None assert tasks[0].end_time is None assert tasks[0].exit_code", "[ { \"name\": \"purge\", \"script\": [ \"tasks/purge\" ] }, {", "args0 assert 'COAP listener south' in args1 assert 'OMF to", "[call('Database command: %s', 'scheduled_processes'), call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True)", "%s' == args[0] p = json.loads(args[1]) assert payload == p", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat", "await scheduler.get_task(task_id) # THEN assert schedule.process_name == task.process_name assert task.reason", "\"tasks/north\", \"--stream_id\", \"4\", \"--debug_level\", \"1\" ] }, ] tasks =", "left pending assert 0 == len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes)", "\"stat collector\" appears twice in this list. assert 'stats collection'", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN", "\"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv) # WHEN assert", "scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async def", "scheduler._ScheduleExecution) # Confirm that no task has started yet assert", "not None assert tasks[0].end_time is None assert tasks[0].exit_code is None", "insert_into_tbl(cls, table_name, payload): pass @classmethod async def update_tbl(cls, table_name, payload):", "no task queued for this schedule at first with pytest.raises(KeyError)", "(scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False log_params = \"Schedule", "THEN assert 1 == cr_cat.call_count assert 1 == get_cat.call_count assert", "= await scheduler.stop() # THEN assert retval is True assert", "collector\", \"script\": [ \"tasks/statistics\" ] }, { \"name\": \"backup\", \"script\":", "TestScheduler: async def scheduler_fixture(self, mocker): # Changed in version 3.8:", "mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion')", "mocker.patch.object(scheduler._logger, \"info\") schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready',", "'_ready', True) # WHEN # Now delete schedule await scheduler.delete_schedule(sch_id)", "log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker): # GIVEN scheduler, schedule,", "queue task and assert that the North task has been", "fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync __author__ = \"<NAME>\"", "False), ('1 day 12:40:11', False), ('2 days', True), ('2 days", "\"Fledge Storage\", \"type\": \"Storage\", \"service_port\": 9999, \"management_port\": 9999, \"address\": \"0.0.0.0\",", "None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive can not be", "current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task',", "mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task", "== disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert", "Now confirm there is one schedule less assert len(scheduler._storage_async.schedules) -", "# WHEN # THEN if is_exception is True: with pytest.raises(Exception):", "WHEN retval = await scheduler.stop() # THEN assert retval is", "fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client import", "GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async =", "WHEN assert scheduler._max_running_tasks is None assert scheduler._max_completed_task_age is None await", "test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "# 0 for Interval Schedule assert schedule.repeat == schedule_row[5] assert", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information',", "== datetime.timedelta(0, 3600) assert schedule.exclusive is True assert schedule.enabled is", "\"purge\", \"script\": [ \"tasks/purge\" ] }, { \"name\": \"stats collector\",", "await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self, mocker): # GIVEN scheduler,", "days', True), ('12:30:11', False), ('0 day 12:30:11', False), ('1 day", "current_time=current_time - 3600) # TODO: Remove after implementation of above", "\"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\":", "mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes)", "as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4())", "of above test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is", "to PI\", \"schedule_name\": \"OMF to PI north\", \"schedule_type\": 3, \"schedule_interval\":", "== cr_cat.call_count assert 1 == get_cat.call_count assert scheduler._max_running_tasks is not", "json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker):", "pytest.raises(NotReadyError) as excinfo: await scheduler.start() with pytest.raises(NotReadyError) as excinfo: await", "MagicMock can't be used in 'await' expression\"), call('An exception was", "0, \"exclusive\": \"t\", \"enabled\": \"f\" }, { \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\":", "PI north') == args args, kwargs = log_info.call_args_list[0] assert \"Process", "await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self,", "} # Changed in version 3.8: patch() now returns an", "scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id", "Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert", "execution\", 'OMF to PI north') == log_info.call_args_list[0] args, kwargs =", "schedule_row) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) +", "scheduler.delete_schedule(sch_id) # THEN # Now confirm there is one schedule", "2.0\" __version__ = \"${VERSION}\" async def mock_task(): return \"\" async", "assert str(ex).endswith(\"name can not be empty\") with pytest.raises(ValueError) as ex:", "assert scheduler._ready is False # WHEN await scheduler.start() # THEN", "def test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "# THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count", "== len(schedules) @pytest.mark.asyncio async def test_get_schedule(self, mocker): # GIVEN scheduler,", "assert 2 == log_info.call_count # args, kwargs = log_info.call_args_list[0] #", "str(ex).endswith('repeat must be of type datetime.timedelta') with pytest.raises(ValueError) as ex:", "# THEN assert scheduler._check_processes_pending is False # WHEN # Check", "@pytest.mark.asyncio async def test_get_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info,", "# WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) +", "as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234 await", "@pytest.mark.asyncio async def test_enable_schedule(self, mocker): # GIVEN scheduler, schedule, log_info,", "queue_task.call_count calls = [call(\"Enabled Schedule '%s/%s' process '%s'\\n\", 'backup hourly',", "north') == args args, kwargs = log_info.call_args_list[0] assert \"Process started:", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN #", "Scheduler._scheduler_loop %s', \"object MagicMock can't be used in 'await' expression\")]", "north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] =", "}, { \"name\": \"backup\", \"script\": [ \"tasks/backup_postgres\" ] }, {", "core_management_port=None) # WHEN await scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes) ==", "\"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\": \"end_time\", \"column\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\",", "_max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector", "\"exception\") random_schedule_id = uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id) # THEN", "in args2 @pytest.mark.asyncio async def test__schedule_next_task(self, mocker): # TODO: Mandatory", "async def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "None assert tasks[0].exit_code is '0' @pytest.mark.asyncio async def test_get_tasks_exception(self, mocker):", "in args assert 'North Readings to PI' in args @pytest.mark.asyncio", "# THEN assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async def test_get_schedule(self,", "mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler,", "'_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999,", "# WHEN # THEN task_id = uuid.uuid4() with pytest.raises(Exception) as", "uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as", "9999, \"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } @classmethod async", "= mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await", "# As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence", "now returns an AsyncMock if the target is an async", "scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive can not be None') with", "log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception())", "\"process_name\": \"backup\", \"schedule_name\": \"backup hourly\", \"schedule_type\": 3, \"schedule_interval\": \"01:00:00\", \"schedule_time\":", "scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as", "__license__ = \"Apache 2.0\" __version__ = \"${VERSION}\" async def mock_task():", "THEN with pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def", "def test__get_schedules(self, test_interval, is_exception, mocker): # GIVEN scheduler = Scheduler()", "of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence # \"stat collector\"", "Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls,", "scheduler.get_task(task_id) # THEN assert schedule.process_name == task.process_name assert task.reason is", "log_error = mocker.patch.object(scheduler._logger, \"error\") log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info =", "await scheduler.delete_schedule(sch_id) # THEN # Now confirm there is one", "Storage\", \"type\": \"Storage\", \"service_port\": 9999, \"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\":", "\"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North Readings to OCS\", \"schedule_name\": \"OMF to OCS", "== len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count # assert call(\"Queued schedule", "assert scheduler._max_running_tasks is not None assert scheduler._max_completed_task_age is not None", "%s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker): # GIVEN", "\"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) #", "await scheduler._wait_for_task_completion(mock_task_process) # THEN # After task completion, sleep above,", "async def test_disable_schedule(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "def test__read_config(self, mocker): async def get_cat(): return { \"max_running_tasks\": {", "scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger", "(scheduler._schedules[sch_id]).enabled is True assert 1 == queue_task.call_count calls = [call(\"Enabled", "not be empty\") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id,", "schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self, mocker): @asyncio.coroutine", "excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker): # GIVEN", "core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN await", "pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await", "(scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False assert 2 ==", "str(ex).endswith('time must be of type datetime.time') with pytest.raises(ValueError) as ex:", "the tasks table that do not have a status of", "assert retval is True assert scheduler._schedule_executions is None assert scheduler._task_processes", "def test_enable_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "time_before_call assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1,", "@pytest.mark.asyncio async def test__resume_check_schedules(self, mocker): # GIVEN scheduler = Scheduler()", "assert str(ex).endswith('day must be between 1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To", "log_debug = await self.scheduler_fixture(mocker) # Assert that there is no", "next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call", "0 for Interval Schedule assert schedule_row[4] is 0 # 0", "uuid.uuid4() # WHEN # THEN with pytest.raises(ScheduleNotFoundError): schedule = await", "schedule.repeat == datetime.timedelta(0, 3600) assert schedule.exclusive is True assert schedule.enabled", "# THEN with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "== \"Schedule successfully disabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled", "None await scheduler._read_config() # THEN assert 1 == cr_cat.call_count assert", "\"f\", \"enabled\": \"t\" }, { \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\", \"schedule_name\":", "during System tests.\") async def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async", "test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN scheduler,", "True # Now queue task and assert that the North", "WHEN # THEN with pytest.raises(Exception): await scheduler._get_process_scripts() log_args = 'Query", "async def test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None,", "is not None assert 3 == log_info.call_count args0, kwargs0 =", "schedule.process_name == task.process_name assert task.reason is '' assert task.state is", "mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is", "log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN # THEN task_id", "== len(scheduler._schedules) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule':", "= uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') #", "= 'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self,", "= [ { \"name\": \"purge\", \"script\": [ \"tasks/purge\" ] },", "args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2", "# TODO: Find why these exceptions are being raised despite", "assert scheduler._ready is False assert scheduler._paused is False assert scheduler._start_time", "assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task", "= sch_execution.next_start_time # THEN assert time_after_call > time.mktime(curr_time.timetuple()) assert 4", "that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id =", "already disabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self, mocker): #", "assert schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0, 3600) assert", "not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN await", "assert schedule.process_name == task.process_name assert task.reason is '' assert task.state", "True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls", "has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) #", "is False # WHEN # Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task',", "OCS\", \"script\": [ \"tasks/north\", \"--stream_id\", \"4\", \"--debug_level\", \"1\" ] },", "} @classmethod async def insert_into_tbl(cls, table_name, payload): pass @classmethod async", "= \"Apache 2.0\" __version__ = \"${VERSION}\" async def mock_task(): return", "THEN assert retval is True assert scheduler._schedule_executions is None assert", "task = await scheduler.get_task(task_id) # THEN assert schedule.process_name == task.process_name", "\"type\": \"Storage\", \"service_port\": 9999, \"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\"", "test_start(self, mocker): # TODO: Mandatory - Add negative tests for", "temp_schedule assert str(ex).endswith('time must be of type datetime.time') with pytest.raises(ValueError)", "calls = [call('Starting'), call('Starting Scheduler: Management port received is %d',", "'0' @pytest.mark.skip(\"Need a suitable fixture\") @pytest.mark.asyncio async def test_get_task_not_found(self, mocker):", "north\", \"schedule_type\": 3, \"schedule_interval\": \"00:00:30\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\":", "scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat must be of type datetime.timedelta')", "# WHEN # THEN with pytest.raises(Exception): await scheduler._get_schedules() log_args =", "time_before_call = sch_execution.next_start_time # WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt +=", "mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN # THEN task_id = uuid.uuid4()", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger, \"info\") await", "# WHEN # Now delete schedule await scheduler.delete_schedule(sch_id) # THEN", "this list. assert 'stats collection' in args3 @pytest.mark.asyncio async def", "as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not fit for unit test.\")", "return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that task", "retval is True assert scheduler._schedule_executions is None assert scheduler._task_processes is", "\"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" }, { \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\",", "an AsyncMock if the target is an async function. if", "cr_cat.call_count assert 1 == get_cat.call_count assert scheduler._max_running_tasks is not None", "def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async def test__read_config(self, mocker): async", "\"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed", "# WHEN # Now delete schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id)", "process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True)", "\"process_name\": \"North Readings to PI\", \"state\": 1, \"start_time\": \"2018-02-06 13:28:14.477868\",", "\"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\",", "True assert message == \"Schedule is already enabled\" assert (scheduler._schedules[sch_id]).id", "scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day must be between 1 and", "WHEN # Now delete schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) #", "of running\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, }", "temp_schedule.name = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can", "mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") #", "scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as", "await scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError)", "log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") current_time =", "after implementation of above test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert", "scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is False # WHEN #", "temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule) del", "Readings to PI' in args args, kwargs = log_info.call_args_list[1] assert", "log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\", [ ('\"Blah\" 0 days', True), ('12:30:11',", "{ \"description\": \"The maximum age, in days (based on the", "already disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False", "args, kwargs = log_info.call_args_list[0] assert 'OMF to PI north' in", "# WHEN await scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) #", "mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts',", "disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False log_params", "1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count # assert call(\"Queued", "mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN", "return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule',", "process_name='TestProcess') # WHEN # THEN with pytest.raises(ValueError) as ex: temp_schedule", "\"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North", "mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler,", "= None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can not", "PI\", \"state\": 1, \"start_time\": \"2018-02-06 13:28:14.477868\", \"end_time\": \"2018-02-06 13:28:14.856375\", \"exit_code\":", "1, \"start_time\": \"2018-02-06 13:28:14.477868\", \"end_time\": \"2018-02-06 13:28:14.856375\", \"exit_code\": \"0\", \"reason\":", "scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is True @pytest.mark.asyncio async def", "sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules')", "\"name\": \"purge\", \"script\": [ \"tasks/purge\" ] }, { \"name\": \"stats", "\"service_port\": 9999, \"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } @classmethod", "1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'OMF to", "test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async def test__read_config(self, mocker): async def", "await scheduler._check_schedules() # THEN assert earliest_start_time is not None assert", "assert str(ex).endswith('repeat must be of type datetime.timedelta') with pytest.raises(ValueError) as", "day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') # WHEN schedule", "Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True)", "call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO: Find why these exceptions are", "\"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await", "len(scheduler._schedules) # WHEN # Now delete schedule with pytest.raises(RuntimeWarning): await", "core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await scheduler._read_storage() # THEN assert", "for Interval Schedule assert schedule_row[4] is 0 # 0 for", "pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN # Now confirm no schedule", "mocker): async def get_cat(): return { \"max_running_tasks\": { \"description\": \"The", "audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler,", "time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert", "are 14 schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN #", "_rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) scheduler =", "None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can not be", "def test_get_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "pass @pytest.mark.asyncio async def test__read_config(self, mocker): async def get_cat(): return", "== log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1]", "str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self, mocker): # GIVEN scheduler,", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules')", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\")", "a rows \" \"in the tasks table that do not", "mocker.patch.object(scheduler._logger, \"info\") await scheduler._get_schedules() sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup mocker.patch.object(scheduler,", "asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler,", "WHEN await scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params", "to PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True)", "_schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") # WHEN await", "8: _rv = await get_cat() else: _rv = asyncio.ensure_future(get_cat()) #", "time_after_call > time_before_call assert 4 == log_info.call_count args0, kwargs0 =", "status of running\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) },", "is not None @pytest.mark.asyncio async def test_start(self, mocker): # TODO:", "pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker):", "await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio", "repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') # WHEN # THEN with pytest.raises(ValueError)", "[\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"column\": \"start_time\", \"format\": \"YYYY-MM-DD", "has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks =", "'await' expression\"), call('An exception was raised by Scheduler._scheduler_loop %s', \"object", "100} args, kwargs = log_exception.call_args assert 'Query failed: %s' ==", "isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_row[0] assert schedule.name == schedule_row[1]", "\"Schedule %s already disabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self,", "Readings to PI\", \"schedule_name\": \"OMF to PI north\", \"schedule_type\": 3,", "mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time -", "uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0,", "mocker.patch.object(scheduler, '_paused', True) # WHEN # THEN with pytest.raises(NotReadyError) as", "not None assert tasks[0].cancel_requested is None assert tasks[0].start_time is not", "await scheduler.get_running_tasks() # THEN assert 1 == len(tasks) assert schedule.process_name", "mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is True", "== 3 and sys.version_info.minor >= 8: _rv = await mock_process()", "# THEN # After task completion, sleep above, no task", "exception was raised by Scheduler._scheduler_loop %s', \"object MagicMock can't be", "{ \"name\": \"stats collector\", \"script\": [ \"tasks/statistics\" ] }, {", "{'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled':", "table_name == 'tasks': return { \"count\": len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks }", "temp_schedule assert str(ex).endswith('repeat must be of type datetime.timedelta') with pytest.raises(ValueError)", "\"schedule_type\": 1, \"schedule_interval\": \"00:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\",", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN schedules", "args assert 'North Readings to PI' in args args, kwargs", "HH24:MI:SS.MS\", \"column\": \"start_time\"}, {\"alias\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"end_time\"},", "assert task.cancel_requested is None assert task.start_time is not None assert", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4()", "is False log_params = \"Schedule %s already disabled\", str(sch_id) log_info.assert_called_with(*log_params)", "== \"schedules\": return {\"count\": 1} @classmethod async def delete_from_tbl(cls, table_name,", "mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id)", "task.start_time is not None assert task.end_time is not None assert", "exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that", "name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess')", "sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status,", "PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) log_exception", "PI north' in args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task()", "\"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") # WHEN # THEN", "excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4()) with", "{ \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\", \"schedule_name\": \"COAP listener south\", \"schedule_type\":", "\"1 day 00:00:40\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\":", "failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self, mocker): #", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule) del temp_schedule", "log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge schedule", "\"address\": \"0.0.0.0\", \"protocol\": \"http\" } @classmethod async def insert_into_tbl(cls, table_name,", "if table_name == 'scheduled_processes': return { \"count\": len(MockStorageAsync.scheduled_processes), \"rows\": MockStorageAsync.scheduled_processes", "\"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\", \"schedule_name\": \"backup hourly\", \"schedule_type\": 3, \"schedule_interval\":", "mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)", "0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has", "the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution)", "3, \"exclusive\": \"f\", \"enabled\": \"t\" }, { \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\":", "PI north') == log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0] assert \"Process", "test_get_running_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "THEN assert time_after_call > time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count args0,", "\"Schedule %s already enabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self,", "def test_start(self, mocker): # TODO: Mandatory - Add negative tests", "port received is %d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls = [call('Database", "WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) + 1", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True)", "await scheduler._get_process_scripts() log_args = 'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio", "time_before_call = sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time #", "type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule =", "True) # WHEN processes = await scheduler.get_scheduled_processes() # THEN assert", "'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id) #", "test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message =", "# assert call(\"Queued schedule '%s' for execution\", 'OMF to PI", "-*- # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio", "{ \"max_running_tasks\": { \"description\": \"The maximum number of tasks that", "== enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule,", "Readings to PI\", \"state\": 1, \"start_time\": \"2018-02-06 13:28:14.477868\", \"end_time\": \"2018-02-06", "# THEN if is_exception is True: with pytest.raises(Exception): await scheduler._get_schedules()", "assert status is True assert message == \"Schedule is already", "import copy import pytest from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager", "assert status is True assert message == \"Schedule {} already", "WHEN schedules = await scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules) ==", "log_exception = mocker.patch.object(scheduler._logger, \"exception\") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval", "'%s/%s' process '%s'\\n\", 'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings", "WHEN with pytest.raises(Exception) as excinfo: tasks = await scheduler.get_tasks() #", "= \"<NAME>\" __copyright__ = \"Copyright (c) 2017 OSIsoft, LLC\" __license__", "@pytest.mark.asyncio async def test_cancel_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info,", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule) # THEN assert", "= log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection' in", "scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger, \"exception\")", "True log_params = \"Schedule %s already enabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio", "0 days', True), ('12:30:11', False), ('0 day 12:30:11', False), ('1", "in main Scheduler class.\") async def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio", "IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending", "== len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task = await", "port): return { \"id\": uuid.uuid4(), \"name\": \"Fledge Storage\", \"type\": \"Storage\",", "# WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4())", "mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings", "None assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1,", "def test__schedule_first_task(self, mocker): # TODO: Mandatory - Add negative tests", "log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler =", "# WHEN await scheduler._start_task(schedule) # THEN # Confirm that task", "call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_stop(self,", "the target is an async function. if sys.version_info.major == 3", "None assert scheduler._process_scripts is None assert scheduler._ready is False assert", "THEN # Confirm that task has started assert 1 ==", "await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError)", "\"f\" }, { \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\", \"schedule_name\": \"COAP listener", "\"count\": len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks } @classmethod async def query_tbl(cls, table_name,", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await scheduler._get_process_scripts() # THEN", "mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv) # WHEN assert scheduler._max_running_tasks is None assert", "# THEN assert 1 == len(tasks) assert schedule.process_name == tasks[0].process_name", "args[0] p = json.loads(args[1]) assert payload == p @pytest.mark.asyncio async", "pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue", "mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False # WHEN await", "len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == log_exception.call_count log_params = 'Attempt", "2017 OSIsoft, LLC\" __license__ = \"Apache 2.0\" __version__ = \"${VERSION}\"", "pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not fit for unit", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready',", "'_paused', True) # WHEN # THEN with pytest.raises(NotReadyError) as excinfo:", "assert 1 == enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN", "'_task_processes') log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup", "sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task()))", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await scheduler._get_process_scripts()", "can not be empty\") with pytest.raises(ValueError) as ex: temp_schedule =", "mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time", "is '0' @pytest.mark.asyncio async def test_get_tasks_exception(self, mocker): # GIVEN scheduler,", "in 'await' expression\"), call('An exception was raised by Scheduler._scheduler_loop %s',", "= copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN", "scheduler.enable_schedule(random_schedule_id) # THEN log_params = \"No such Schedule %s\", str(random_schedule_id)", "await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\",", "status is True assert message == \"Schedule is already enabled\"", "mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() #", "\"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task')", "ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule)", "MockStorageAsync.schedules } if table_name == 'scheduled_processes': return { \"count\": len(MockStorageAsync.scheduled_processes),", "started: Schedule '%s' process '%s' task %s pid %s, %s", "test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN scheduler,", "backup # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await", "call(\"Queued schedule '%s' for execution\", 'OMF to PI north') ==", "== schedule_row[0] assert schedule.name == schedule_row[1] assert schedule.schedule_type == schedule_row[2]", "'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1", "await scheduler.enable_schedule(sch_id) # THEN assert status is True assert message", "\"where\": {\"column\": \"id\", \"condition\": \"=\", \"value\": str(task_id)}} args, kwargs =", "North task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution)", "async def query_tbl(cls, table_name, query=None): if table_name == 'schedules': return", "such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self, mocker):", "else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task()))", "log_params = 'Attempt to delete an enabled Schedule %s. Not", "None) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is True @pytest.mark.asyncio async", "await scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async", "to PI north') == log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0] assert", "\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\", \"schedule_name\": \"purge\", \"schedule_type\": 4, \"schedule_interval\": \"01:00:00\", \"schedule_time\":", "assert 1 == resume_sch.call_count assert 1 == enable_schedule.call_count assert 0", "scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro(): return", "\"info\") mock_schedules = dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings", "scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async def", "await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler,", "System tests.\") async def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async def", "'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\")", "mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger", "mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))", "'North Readings to PI' in args args, kwargs = log_info.call_args_list[1]", "'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North Readings to", "log_info.call_args_list[0] assert 'OMF to PI north' in args assert 'North", "also gets executed, hence # \"stat collector\" appears twice in", "# Only valid for test_save_schedule_update if table_name == \"schedules\": return", "async def delete_from_tbl(cls, table_name, condition=None): pass @classmethod async def query_tbl_with_payload(cls,", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready',", "assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 1", "task queued for mock_schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id]", "appears twice in this list. assert 'stats collection' in args3", "datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call = sch_execution.next_start_time # THEN assert", "[call('Starting'), call('Starting Scheduler: Management port received is %d', 9999)] log_info.assert_has_calls(calls,", "FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio import datetime", "2 == log_info.call_count # args, kwargs = log_info.call_args_list[0] # assert", "len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test", "time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "and 7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To be done\") async def test_remove_service_from_task_processes(self): pass", "assert 'OMF to PI north' in args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not", "with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day =", "WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert isinstance(schedule, Schedule)", "host, port): return { \"id\": uuid.uuid4(), \"name\": \"Fledge Storage\", \"type\":", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() #", "\"\" } ] def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def", "of tasks that can be running at any given time\",", "is True # Now queue task and assert that the", "any_order=True) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name':", "table_name, query_payload): if table_name == 'tasks': return { \"count\": len(MockStorageAsync.tasks),", "assert tasks[0].end_time is None assert tasks[0].exit_code is None @pytest.mark.asyncio async", "False), ('00:25:61', True) ]) async def test__get_schedules(self, test_interval, is_exception, mocker):", "'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls,", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger,", "== log_exception.call_count log_params = 'Attempt to delete an enabled Schedule", "\"script\": [ \"tasks/backup_postgres\" ] }, { \"name\": \"COAP\", \"script\": [", "datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call = sch_execution.next_start_time", "scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in version 3.8: patch()", "task_id = uuid.uuid4() with pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id) #", "sch_id assert (scheduler._schedules[sch_id]).enabled is False assert 2 == log_info.call_count calls", "assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN await scheduler.cancel_task(task_id) # THEN", "mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN await scheduler.purge_tasks() # THEN assert", "= mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN # THEN task_id =", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN # Check IF part", "utf-8 -*- # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import", "scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler,", "\"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North Readings to PI\", \"schedule_name\": \"OMF to PI", "core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler,", "schedule_row[7] assert schedule.enabled == schedule_row[8] assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio", "= mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] =", "excinfo: await scheduler.get_task(task_id) # THEN payload = {\"return\": [\"id\", \"process_name\",", "# FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio import", "tasks = await scheduler.get_running_tasks() # THEN assert 1 == len(tasks)", "schedule_row) temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive", "status, message = await scheduler.disable_schedule(sch_id) # THEN assert status is", "m.terminate = lambda: True return m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class TestScheduler:", "\"Schedule {} already disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled", "scheduler._get_schedules() # Confirm no. of schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None", "'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker): #", "not implemented in main Scheduler class.\") async def test__mark_tasks_interrupted(self, mocker):", "len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls = [call('Starting'), call('Starting Scheduler: Management port", "Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug", "day=None, exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger, \"exception\") log_error = mocker.patch.object(scheduler._logger,", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN # Check", "\"info\") enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\",", "(scheduler._schedules[sch_id]).enabled is True log_params = \"Schedule %s already enabled\", str(sch_id)", "assert schedule.name == schedule_row[1] assert schedule.schedule_type == schedule_row[2] assert schedule_row[3]", "Confirm there are 14 schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) #", "async def test_get_task_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "args2 @pytest.mark.asyncio async def test__schedule_next_task(self, mocker): # TODO: Mandatory -", "log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3,", "args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats", "tasks[0].process_name assert tasks[0].reason is None assert tasks[0].state == Task.State.RUNNING assert", "HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"], \"limit\": 100} args, kwargs = log_exception.call_args assert", "as excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4())", "else: _rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule)", "def test_get_task_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "by Scheduler._purge_tasks %s', \"object MagicMock can't be used in 'await'", "WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN assert status", "maximum number of tasks that can be running at any", "# THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async", "schedule_id assert schedule.name == \"purge\" assert schedule.schedule_type == Schedule.Type.MANUAL assert", "assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted()", "status is True assert message == \"Schedule {} already disabled\".format(str(sch_id))", "in args0 assert 'COAP listener south' in args1 assert 'OMF", "def test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN", "mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id = uuid.uuid4() #", "return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message", "mocker.patch.object(scheduler, '_ready', True) # WHEN # Now delete schedule await", "== 'schedules': return { \"count\": len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules } if", "@pytest.mark.asyncio async def test_get_task_not_found(self, mocker): # GIVEN scheduler, schedule, log_info,", "\"schedule_name\": \"COAP listener south\", \"schedule_type\": 1, \"schedule_interval\": \"00:00:00\", \"schedule_time\": \"\",", "\"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") return scheduler, schedule, log_info, log_exception,", "\"purge\" assert schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0, 3600)", "= [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0,", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, \"create_category\",", "= mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN # THEN", "= list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task = await scheduler.get_task(task_id) # THEN", "test_get_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "schedule_row) temp_schedule.name = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name", "patch() now returns an AsyncMock if the target is an", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager,", "Will be tested during System tests.\") async def test__scheduler_loop(self, mocker):", "Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0, 3600) assert schedule.exclusive is True", "async def test_stop(self, mocker): # TODO: Mandatory - Add negative", "@pytest.mark.asyncio async def test_get_tasks(self, mocker): # GIVEN scheduler, schedule, log_info,", "{ \"name\": \"backup\", \"script\": [ \"tasks/backup_postgres\" ] }, { \"name\":", "tasks[0].state is not None assert tasks[0].cancel_requested is None assert tasks[0].start_time", "pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo: await", "with pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo:", "@pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not implemented in main Scheduler class.\") async def", "import * from fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync", "= sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time # THEN", "made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN await scheduler.cancel_task(task_id) #", "assert (scheduler._schedules[sch_id]).enabled is False assert 2 == log_info.call_count calls =", "task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule)", "\"schedule_type\": 4, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\",", "scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self, mocker): # GIVEN scheduler, schedule,", "(scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True log_params = \"Schedule", "schedule.exclusive == schedule_row[7] assert schedule.enabled == schedule_row[8] assert schedule.process_name ==", "None @pytest.mark.asyncio async def test_start(self, mocker): # TODO: Mandatory -", "log_info.call_count # args, kwargs = log_info.call_args_list[0] # assert (\"Queued schedule", "enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there", "queued for mock_schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is", "scheduler.get_task(task_id) # THEN payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\",", "assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self, mocker): #", "await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector sch =", "\"schedule_name\": \"OMF to PI north\", \"schedule_type\": 3, \"schedule_interval\": \"00:00:30\", \"schedule_time\":", "\"\" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug =", "Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') #", "assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self, mocker): @asyncio.coroutine", "mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id = uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id) #", "pytest.raises(Exception) as excinfo: tasks = await scheduler.get_tasks() # THEN payload", "mocker): pass class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port)", "enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro())", "scheduler._schedules is None assert scheduler._process_scripts is None assert scheduler._ready is", "as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4())", "THEN with pytest.raises(Exception): await scheduler._get_schedules() log_args = 'Query failed: %s',", "async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" #", "async def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async def test__read_config(self, mocker):", "%d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls = [call('Database command: %s', 'scheduled_processes'),", "fit for unit test.\") @pytest.mark.asyncio async def test__terminate_child_processes(self, mocker): pass", "Readings to PI\", \"script\": [ \"tasks/north\", \"--stream_id\", \"1\", \"--debug_level\", \"1\"", "\"condition\": \"=\", \"value\": str(task_id)}} args, kwargs = log_exception.call_args assert 'Query", "= [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup',", "Schedule '%s/%s' process '%s'\\n\", 'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North", "North task queued for schedule with pytest.raises(KeyError) as excinfo: assert", "'North Readings to PI' in args @pytest.mark.asyncio async def test_cancel_task_exception(self,", "mocker.patch.object(scheduler._logger, \"info\") log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler,", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules)", "scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt", "'schedules', new_schedules) # WHEN # THEN if is_exception is True:", "mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN #", "PI North # WHEN status, message = await scheduler.disable_schedule(sch_id) #", "assert payload == p @pytest.mark.asyncio async def test_get_tasks(self, mocker): #", "north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls) assert 1 ==", "3, \"schedule_interval\": \"1 day 00:00:40\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\":", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger,", "After task completion, sleep above, no task processes should be", "1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule',", "async def test_cancel_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time - 3600) #", "Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec',", "\"end_time\"}, \"reason\", \"exit_code\"], \"where\": {\"column\": \"id\", \"condition\": \"=\", \"value\": str(task_id)}}", "\"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\": { \"description\": \"The", "that there is no task queued for schedule with pytest.raises(KeyError)", "await scheduler._get_schedules() log_args = 'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio", "{\"column\": \"id\", \"condition\": \"=\", \"value\": str(task_id)}} args, kwargs = log_exception.call_args", "status, message = await scheduler.enable_schedule(sch_id) # THEN assert status is", "{ \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats collector\", \"schedule_name\": \"stats collection\", \"schedule_type\":", "def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN", "return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task is not", "%s already disabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self, mocker):", "\"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats", "schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN # Now delete", "\"limit\": 100} args, kwargs = log_exception.call_args assert 'Query failed: %s'", "def test_get_tasks_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() #", "to PI\") # WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN # After", "scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence # \"stat collector\" appears", "schedules = await scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules) == len(schedules)", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None)", "log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotRunningError)", "schedule_row[2] assert schedule_row[3] is 0 # 0 for Interval Schedule", "args @pytest.mark.asyncio async def test_purge_tasks(self, mocker): # TODO: Mandatory -", "self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") #", "fixture\") @pytest.mark.asyncio async def test_get_task_not_found(self, mocker): # GIVEN scheduler, schedule,", "log_args = 'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\",", "assert schedule_row[4] is 0 # 0 for Interval Schedule assert", "args, kwargs = log_info.call_args_list[0] assert \"Process started: Schedule '%s' process", "= 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat must be", "schedule_row) temp_schedule.day = 0 temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule) del", "self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True)", "request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO: Find why these exceptions", "'_max_completed_task_age', datetime.datetime.now()) # WHEN await scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task", "as excinfo: await scheduler.get_task(task_id) # THEN payload = {\"return\": [\"id\",", "def query_tbl_with_payload(cls, table_name, query_payload): if table_name == 'tasks': return {", "assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test", "is not None assert task.cancel_requested is None assert task.start_time is", "# Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN", "scheduler.enable_schedule(sch_id) # THEN assert status is True assert message ==", "del temp_schedule assert str(ex).endswith('repeat must be of type datetime.timedelta') with", "assert call(\"Queued schedule '%s' for execution\", 'OMF to PI north')", "is True assert message == \"Schedule is already enabled\" assert", "\"COAP\", \"script\": [ \"services/south\" ] }, { \"name\": \"North Readings", "HH24:MI:SS.MS\", \"column\": \"end_time\"}, \"reason\", \"exit_code\"], \"where\": {\"column\": \"id\", \"condition\": \"=\",", "{\"alias\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"start_time\"}, {\"alias\": \"end_time\", \"format\":", "is True assert message == \"Schedule {} already disabled\".format(str(sch_id)) assert", "mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo:", "kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection'", "core_management_port=None) # WHEN # Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5)))", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert", "@pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler, schedule, log_info,", "= [call(\"Enabled Schedule '%s/%s' process '%s'\\n\", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')]", "await scheduler.get_tasks() # THEN payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\",", "pass @classmethod async def update_tbl(cls, table_name, payload): # Only valid", "datetime.timedelta(0, 3600) assert schedule.exclusive is True assert schedule.enabled is True", "# THEN assert scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts)", "def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to", "mocking _purge_tasks_task, _scheduler_loop_task calls = [call('An exception was raised by", "Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await", "\"column\": \"start_time\"}, {\"alias\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"end_time\"}, \"reason\",", "'_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id =", "log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with", "await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert", "'COAP listener south' in args1 assert 'OMF to PI north'", "\"Schedule successfully enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is", "is True assert schedule.process_name == \"purge\" @pytest.mark.asyncio async def test_get_schedule_exception(self,", "test_get_tasks_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "\"--stream_id\", \"1\", \"--debug_level\", \"1\" ] }, { \"name\": \"North Readings", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id =", "str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker): # GIVEN scheduler,", "schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge schedule # WHEN schedule =", "\"<NAME>\" __copyright__ = \"Copyright (c) 2017 OSIsoft, LLC\" __license__ =", "def test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "[ \"tasks/purge\" ] }, { \"name\": \"stats collector\", \"script\": [", "= [ { \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North Readings to PI\",", "schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30),", "mock_task_process mock_schedule_executions = dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution", "'tasks': return { \"count\": len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks } @classmethod async", "mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") # WHEN await scheduler._wait_for_task_completion(mock_task_process)", "{\"alias\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"end_time\"}, \"reason\", \"exit_code\"], \"where\":", "#Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await", "is False assert scheduler._paused is False assert scheduler._start_time is None", "[call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO: Find why", "await scheduler._get_schedules() sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup mocker.patch.object(scheduler, '_ready', True)", "pending assert 0 == len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args,", "== len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks = await", "THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_id assert schedule.name", "test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio import datetime import uuid import", "] }, { \"name\": \"North Readings to OCS\", \"script\": [", "1, \"schedule_interval\": \"00:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\":", "== \"purge\" assert schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0,", "\"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler,", "log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) #", "scheduler.start() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as", "'_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North", "str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\": { \"description\": \"The maximum age,", "== enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self,", "class.\") async def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async def test__read_config(self,", "Confirm no. of schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready',", "%s', \"object MagicMock can't be used in 'await' expression\")] log_exception.assert_has_calls(calls)", "await scheduler.enable_schedule(random_schedule_id) # THEN log_params = \"No such Schedule %s\",", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info =", "}, { \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North Readings to OCS\", \"schedule_name\":", "\"1\", \"--debug_level\", \"1\" ] }, { \"name\": \"North Readings to", "# WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) + 1", "as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234 await", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception", "schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED,", "audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count", "OCS north\", \"schedule_type\": 3, \"schedule_interval\": \"1 day 00:00:40\", \"schedule_time\": \"\",", "scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\", name=\"OMF", "'_ready', True) # WHEN processes = await scheduler.get_scheduled_processes() # THEN", "{'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False,", "mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) schedule_id =", "== audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'OMF to PI", "as excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4())", "args1 assert 'OMF to PI north' in args2 @pytest.mark.asyncio async", "WHEN scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time # THEN assert time_after_call", "age, in days (based on the start time), for a", "== enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self,", "def mock_coro(): return \"\" # GIVEN scheduler, schedule, log_info, log_exception,", "== resume_sch.call_count assert 1 == enable_schedule.call_count assert 0 == disable_schedule.call_count", "temp_schedule assert str(ex).endswith('exclusive can not be None') with pytest.raises(ValueError) as", "is None assert scheduler._process_scripts is None assert scheduler._ready is False", "Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) #", "json from unittest.mock import MagicMock, call import sys import copy", "running tasks\\n%s\" in args assert 'OMF to PI north' in", "{'schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': False,", "pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id] is True # WHEN await", "'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North Readings to PI'}})] audit_logger.assert_has_calls(calls,", "in args @pytest.mark.asyncio async def test_cancel_task_exception(self, mocker): # GIVEN scheduler,", "'_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception =", "\"schedule_interval\": \"1 day 00:00:40\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\",", "with pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo:", "scheduler._check_processes_pending is False # WHEN # Check ELSE part mocker.patch.object(scheduler,", "# Now confirm no schedule is deleted assert len(scheduler._storage_async.schedules) ==", "{ \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North Readings to PI\", \"schedule_name\": \"OMF", "'debug', side_effect=Exception()) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN #", "'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True)", "'_wait_for_task_completion') # Confirm that task has not started yet assert", "excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with", "'%s' for execution\", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self,", "mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time - 3600, _paused=False, _task_processes={}) # WHEN", "mock_schedules = dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to", "WHEN await scheduler.enable_schedule(random_schedule_id) # THEN log_params = \"No such Schedule", "that task has not started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes)", "\"schedule_name\", \"state\", {\"alias\": \"start_time\", \"column\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\":", "= mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, \"exception\") new_schedules =", "not started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await", "\"North Readings to PI\", \"state\": 1, \"start_time\": \"2018-02-06 13:28:14.477868\", \"end_time\":", "# WHEN await scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not", "] }, { \"name\": \"COAP\", \"script\": [ \"services/south\" ] },", "exclusive=False, enabled=True, process_name='TestProcess') # WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) #", "for mock_schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True", "new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) #", "await self.scheduler_fixture(mocker) # Assert that there is no North task", "[ \"tasks/backup_postgres\" ] }, { \"name\": \"COAP\", \"script\": [ \"services/south\"", "is an async function. if sys.version_info.major == 3 and sys.version_info.minor", "_paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN await scheduler.purge_tasks() # THEN", "scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker): # GIVEN scheduler, schedule,", "= await self.scheduler_fixture(mocker) schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge schedule #", "async def test__read_config(self, mocker): async def get_cat(): return { \"max_running_tasks\":", "scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time # THEN assert time_after_call >", "_start_time=current_time - 3600, _paused=False, _task_processes={}) # WHEN retval = await", "as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self, mocker): #", "processes = await scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes)", "Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert", "OSIsoft, LLC\" __license__ = \"Apache 2.0\" __version__ = \"${VERSION}\" async", "# THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self,", "@pytest.mark.asyncio async def test__start_task(self, mocker): # TODO: Mandatory - Add", "# THEN assert time_after_call > time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count", "number of tasks that can be running at any given", "schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True #", "mocker.patch.object(scheduler._logger, \"exception\") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules',", "\"backup\", \"script\": [ \"tasks/backup_postgres\" ] }, { \"name\": \"COAP\", \"script\":", "] }, { \"name\": \"backup\", \"script\": [ \"tasks/backup_postgres\" ] },", "assert tasks[0].start_time is not None assert tasks[0].end_time is not None", "'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\", [ ('\"Blah\" 0 days', True),", "disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return", "time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time", "await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger,", "@pytest.mark.asyncio async def test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro(): return \"\"", "log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id", "test__start_task(self, mocker): # TODO: Mandatory - Add negative tests for", "# WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN # After task completion,", "AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import *", "scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time", "await self.scheduler_fixture(mocker) schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge schedule # WHEN", "fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import * from", "= mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions)", "mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler,", "excinfo: tasks = await scheduler.get_tasks() # THEN payload = {\"return\":", "_start_time=current_time-3600) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector sch", "%s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker):", "enabled Schedule %s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert", "test_get_task(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "as excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules()", "\"name\": \"stats collector\", \"script\": [ \"tasks/statistics\" ] }, { \"name\":", "mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is no task queued", "schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30),", "# WHEN # Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules()", "WHEN # THEN task_id = uuid.uuid4() with pytest.raises(Exception) as excinfo:", "= mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_schedule_first_task',", "None assert task.end_time is not None assert task.exit_code is '0'", "that there is no task queued for mock_schedule with pytest.raises(KeyError)", "await scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async", "async def test_get_schedules(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "table_name == 'schedules': return { \"count\": len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules }", "backup # WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN", "[call(\"Enabled Schedule '%s/%s' process '%s'\\n\", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls,", "assert task.exit_code is '0' @pytest.mark.skip(\"Need a suitable fixture\") @pytest.mark.asyncio async", "if table_name == 'schedules': return { \"count\": len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules", "full code coverage # GIVEN scheduler = Scheduler() scheduler._storage =", "= mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) #", "PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1,", "not None assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0]", "\"--debug_level\", \"1\" ] }, { \"name\": \"North Readings to OCS\",", "sys.version_info.minor >= 8: _rv = await get_cat() else: _rv =", "from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import *", "True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\")", "\"script\": [ \"tasks/statistics\" ] }, { \"name\": \"backup\", \"script\": [", "# THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "\"schedules\": return {\"count\": 1} @classmethod async def delete_from_tbl(cls, table_name, condition=None):", "'_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that", "repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule =", "id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True,", "hourly\", \"schedule_type\": 3, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\":", "\"exception\") log_error = mocker.patch.object(scheduler._logger, \"error\") log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info", "mocker.patch.object(scheduler, '_ready', True) # WHEN processes = await scheduler.get_scheduled_processes() #", "rows \" \"in the tasks table that do not have", "'%s' task %s pid %s, %s running tasks\\n%s\" in args", "scheduler._get_schedules() assert 1 == log_exception.call_count else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules)", "tasks[0].exit_code is '0' @pytest.mark.asyncio async def test_get_tasks_exception(self, mocker): # GIVEN", "9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } @classmethod async def insert_into_tbl(cls,", "first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info =", "enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes = dict()", "@classmethod async def insert_into_tbl(cls, table_name, payload): pass @classmethod async def", "\"name\": \"North Readings to OCS\", \"script\": [ \"tasks/north\", \"--stream_id\", \"4\",", "next_dt) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time_before_call", "to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED,", "async def test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "to PI\", name=\"OMF to PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None,", "temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule) del", "has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] #", "self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule',", "time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') # WHEN schedule =", "log_exception, log_error, log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self, mocker): # GIVEN", "return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row", "delete schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN # Now", "\"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" }, ] scheduled_processes =", "def test_cancel_task_all_ok(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 =", "kwargs = log_exception.call_args assert 'Query failed: %s' == args[0] p", "was raised by Scheduler._purge_tasks %s', \"object MagicMock can't be used", "north' in args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task() also", "log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup queue_task", "def test__check_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests", "\"id\": uuid.uuid4(), \"name\": \"Fledge Storage\", \"type\": \"Storage\", \"service_port\": 9999, \"management_port\":", "yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that", "is no task queued for this schedule at first with", "def get_cat(): return { \"max_running_tasks\": { \"description\": \"The maximum number", "'0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 ==", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info", "None calls = [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) #", "{\"alias\": \"start_time\", \"column\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\": \"end_time\", \"column\":", "1 == enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN await", "@pytest.mark.asyncio async def test_get_schedule(self, mocker): # GIVEN scheduler, schedule, log_info,", "pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0", "False assert 2 == log_info.call_count calls = [call('No Task running", "1 == resume_sch.call_count assert 1 == enable_schedule.call_count assert 0 ==", "was raised by Scheduler._scheduler_loop %s', \"object MagicMock can't be used", "excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self, mocker): # GIVEN", "'0' @pytest.mark.asyncio async def test_get_tasks_exception(self, mocker): # GIVEN scheduler, schedule,", "for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True", "executed, hence # \"stat collector\" appears twice in this list.", "an async function. if sys.version_info.major == 3 and sys.version_info.minor >=", "\"error\") log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") return", "days (based on the start time), for a rows \"", "\"0.0.0.0\", \"protocol\": \"http\" } @classmethod async def insert_into_tbl(cls, table_name, payload):", "= mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) #", "raised despite mocking _purge_tasks_task, _scheduler_loop_task calls = [call('An exception was", "enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True assert", "= scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN", "1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task =", "dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\", name=\"OMF", "\"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\", \"schedule_name\": \"purge\", \"schedule_type\": 4, \"schedule_interval\": \"01:00:00\",", "def test_queue_task(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "= await self.scheduler_fixture(mocker) # WHEN schedules = await scheduler.get_schedules() #", "\"t\", \"enabled\": \"t\" }, { \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats collector\",", "MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger,", "await scheduler._get_schedules() # Confirm no. of schedules assert len(scheduler._storage_async.schedules) ==", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await", "return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time - 3600) # TODO: Remove", "assert 1 == queue_task.call_count calls = [call(\"Enabled Schedule '%s/%s' process", "version 3.8: patch() now returns an AsyncMock if the target", "mock_process() else: _rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage =", "Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker):", "\"max_completed_task_age_days\": { \"description\": \"The maximum age, in days (based on", "scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_id", "lambda: True return m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class TestScheduler: async def", "assert 0 == len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs", "== schedule_row[8] assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self,", "len(tasks) assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is None assert", "list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel request has not been made", "def test_not_ready_and_paused(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm", "tests for full code coverage # GIVEN scheduler = Scheduler()", "TODO: Find why these exceptions are being raised despite mocking", "scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not fit for unit test.\") @pytest.mark.asyncio async def", "'repeat': 30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess',", "\"00:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" },", "await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self, mocker): # GIVEN scheduler,", "'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self,", "in args1 assert 'OMF to PI north' in args2 #", "task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task = await scheduler.get_task(task_id) #", "suitable for unit testing. Will be tested during System tests.\")", "'stats collection' in args3 @pytest.mark.asyncio async def test__schedule_first_task(self, mocker): #", "Readings to PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count calls =", "# backup # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo:", "= scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time) time_after_call", "@pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not suitable for unit testing. Will be tested", "= [call('No Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule", "call('An exception was raised by Scheduler._scheduler_loop %s', \"object MagicMock can't", "excinfo: assert scheduler._schedule_executions[sch_id] is True # WHEN await scheduler.queue_task(sch_id) #", "has not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN", "{\"alias\": \"end_time\", \"column\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"], \"limit\":", "is None assert tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested is None", "at any given time\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS)", "assert schedule_row[3] is 0 # 0 for Interval Schedule assert", "= mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup #", "\"exception\") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules)", "schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0, 3600) assert schedule.exclusive", "def test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls = [call('Starting'),", "schedule.name == schedule_row[1] assert schedule.schedule_type == schedule_row[2] assert schedule_row[3] is", "\"reason\": \"\" } ] def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port)", "excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4()) with", "\"enabled\": \"f\" }, { \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\", \"schedule_name\": \"COAP", "mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules()", "pytest from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import", "self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id) # THEN", "log_exception.call_count else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async", "scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker): # GIVEN scheduler, schedule,", "asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule = mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id =", "assert (scheduler._schedules[sch_id]).enabled is True log_params = \"Schedule %s already enabled\",", "mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule = scheduler._ScheduleRow(", "ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None await scheduler.save_schedule(temp_schedule)", "= await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotRunningError) as", "core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return {", "schedule_row) temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time", "# WHEN await scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task is None", "assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_row[0] assert schedule.name ==", "return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time - 3600,", "'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1", "'_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False # WHEN await scheduler.start()", "to PI' in args args, kwargs = log_info.call_args_list[1] assert \"Stopping", "8: _rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) scheduler", "args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not suitable for unit testing. Will be", "log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await scheduler._get_schedules()", "with pytest.raises(Exception) as excinfo: tasks = await scheduler.get_tasks() # THEN", "assert scheduler._process_scripts is None assert scheduler._ready is False assert scheduler._paused", "with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now", "= log_info.call_args_list[2] assert 'stats collection' in args0 assert 'COAP listener", "id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True,", "async def test_get_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') #", "1 == cr_cat.call_count assert 1 == get_cat.call_count assert scheduler._max_running_tasks is", "len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule) # THEN # Confirm that", "async def query_tbl_with_payload(cls, table_name, query_payload): if table_name == 'tasks': return", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age',", "# See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio import datetime import", "async def test__get_schedules(self, test_interval, is_exception, mocker): # GIVEN scheduler =", "is None # WHEN await scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested", "{\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\",", "WHEN # THEN with pytest.raises(Exception): await scheduler._get_schedules() log_args = 'Query", "'_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time - 3600) # TODO:", "= asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async", "sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time # THEN assert", "\"The maximum number of tasks that can be running at", "THEN assert status is True assert message == \"Schedule {}", "unit testing. Will be tested during System tests.\") async def", "\"get_category_all_items\", return_value=_rv) # WHEN assert scheduler._max_running_tasks is None assert scheduler._max_completed_task_age", "with pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not fit for", "= MagicMock() m.pid = 9999 m.terminate = lambda: True return", "\"No such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self,", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") current_time =", "await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test", "None') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time", "random_schedule_id = uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id) # THEN log_params", "\"enabled\": \"t\" }, { \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\", \"schedule_name\": \"backup", "scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False,", "temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule) del", "assert scheduler._check_processes_pending is False # WHEN # Check ELSE part", "core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule", "= \"Copyright (c) 2017 OSIsoft, LLC\" __license__ = \"Apache 2.0\"", "\"t\" }, { \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North Readings to PI\",", "is not None assert task.end_time is not None assert task.exit_code", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await scheduler._get_process_scripts() #", "('0 day 12:30:11', False), ('1 day 12:40:11', False), ('2 days',", "Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import", "mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id", "('12:30:11', False), ('0 day 12:30:11', False), ('1 day 12:40:11', False),", "= log_info.call_args_list[0] assert \"Process started: Schedule '%s' process '%s' task", "import json from unittest.mock import MagicMock, call import sys import", "uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge schedule # WHEN schedule = await scheduler.get_schedule(schedule_id)", "time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "] }, { \"name\": \"stats collector\", \"script\": [ \"tasks/statistics\" ]", "repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger, \"exception\")", "assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not implemented in main", "'_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is no", "assert scheduler._paused is False assert scheduler._start_time is None calls =", "exclusive=False, enabled=True, process_name='TestProcess') # WHEN # THEN with pytest.raises(ValueError) as", "is 0 # 0 for Interval Schedule assert schedule_row[4] is", "is not None assert task.exit_code is '0' @pytest.mark.skip(\"Need a suitable", "\"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\", \"schedule_name\": \"backup hourly\", \"schedule_type\": 3, \"schedule_interval\": \"01:00:00\",", "len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task = await scheduler.get_task(task_id)", "to delete an enabled Schedule %s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params)", "test_delete_schedule_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "= log_info.call_args_list[0] assert 'OMF to PI north' in args assert", "assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1", "for execution\", 'OMF to PI north') == args args, kwargs", "resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") enable_schedule =", "assert scheduler._max_completed_task_age is None await scheduler._read_config() # THEN assert 1", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") #", "pass @pytest.mark.asyncio async def test_disable_schedule(self, mocker): # GIVEN scheduler =", "return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF", "def test_delete_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "running at any given time\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\":", "WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio", "}, { \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\", \"schedule_name\": \"backup hourly\", \"schedule_type\":", "\"exclusive\": \"f\", \"enabled\": \"t\" }, { \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\",", "StorageClientAsync __author__ = \"<NAME>\" __copyright__ = \"Copyright (c) 2017 OSIsoft,", "WHEN tasks = await scheduler.get_running_tasks() # THEN assert 1 ==", "\"schedule_interval\": \"00:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\"", "if table_name == 'tasks': return { \"count\": len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks", "test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False # WHEN", "'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_stop(self, mocker): # TODO:", "False log_params = \"Schedule %s already disabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio", "async def update_tbl(cls, table_name, payload): # Only valid for test_save_schedule_update", "@pytest.mark.asyncio async def test__get_schedules_exception(self, mocker): # GIVEN scheduler = Scheduler()", "# THEN assert time_after_call > time_before_call assert 4 == log_info.call_count", "THEN # Now confirm there is one schedule less assert", "= mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN with pytest.raises(Exception) as excinfo:", "'_task_processes') log_exception = mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id = uuid.uuid4() # WHEN", "# THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self,", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule", "is_exception is True: with pytest.raises(Exception): await scheduler._get_schedules() assert 1 ==", "used in 'await' expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker):", "schedule.enabled is True assert schedule.process_name == \"purge\" @pytest.mark.asyncio async def", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler,", "test_queue_task(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "not None assert tasks[0].exit_code is '0' @pytest.mark.asyncio async def test_get_tasks_exception(self,", "datetime.datetime.now()) # WHEN await scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task is", "Interval Schedule assert schedule.repeat == schedule_row[5] assert schedule.exclusive == schedule_row[7]", "'_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is False #", "disabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self, mocker): # GIVEN", "an enabled Schedule %s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async", "process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule) #", "scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) # WHEN processes = await scheduler.get_scheduled_processes()", "THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params = \"Queued schedule '%s'", "assert schedule.name == \"purge\" assert schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat", "for Interval Schedule assert schedule.repeat == schedule_row[5] assert schedule.exclusive ==", "not None assert tasks[0].end_time is not None assert tasks[0].exit_code is", "on the start time), for a rows \" \"in the", "temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0 temp_schedule.time = datetime.time()", "'_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes')", "= time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\")", "@pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler = Scheduler()", "not be None') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id,", "schedule is deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 ==", "scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug", "excinfo: await scheduler.start() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes() with", "_core_management_host=\"0.0.0.0\", _start_time=current_time - 3600, _paused=False, _task_processes={}) # WHEN retval =", "== schedule_row[7] assert schedule.enabled == schedule_row[8] assert schedule.process_name == schedule_row[9]", "GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker)", "scheduler._purge_tasks_task is not None @pytest.mark.asyncio async def test__check_schedules(self, mocker): #", "\"4\", \"--debug_level\", \"1\" ] }, ] tasks = [ {", "= scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time)", "pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker):", "# Now delete schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN", "@pytest.mark.asyncio async def test_get_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info,", "await scheduler._get_schedules() assert 1 == log_exception.call_count else: await scheduler._get_schedules() assert", "== Task.State.RUNNING assert tasks[0].cancel_requested is None assert tasks[0].start_time is not", "in args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets", "@pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info,", "that can be running at any given time\", \"type\": \"integer\",", "Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') #", "is no task queued for mock_schedule with pytest.raises(KeyError) as excinfo:", "random_schedule_id = uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id) # THEN log_params", "len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count # assert call(\"Queued schedule '%s'", "why these exceptions are being raised despite mocking _purge_tasks_task, _scheduler_loop_task", "return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False # WHEN await scheduler.start() #", "None assert tasks[0].end_time is None assert tasks[0].exit_code is None @pytest.mark.asyncio", "\"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\": { \"description\": \"The maximum", "\"enabled\": \"t\" }, { \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North Readings to", "process: Schedule '%s' process '%s' task %s pid %s\\n%s\" in", "mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions = dict() mock_schedule_execution = scheduler._ScheduleExecution()", "disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1", "of schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) #", "async def test__wait_for_task_completion(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time()", "audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch", "return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv) # WHEN assert scheduler._max_running_tasks", "'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self, mocker):", "assert 1 == cr_cat.call_count assert 1 == get_cat.call_count assert scheduler._max_running_tasks", "\"Schedule successfully disabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is", "\"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"column\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"},", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN #", "3600) assert schedule.exclusive is True assert schedule.enabled is True assert", "1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat must be of", "= 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time must be", "True assert message == \"Schedule successfully enabled\" assert (scheduler._schedules[sch_id]).id ==", "1 == len(tasks) assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is", "\"--debug_level\", \"1\" ] }, ] tasks = [ { \"id\":", "from unittest.mock import MagicMock, call import sys import copy import", "3.8: patch() now returns an AsyncMock if the target is", "returns an AsyncMock if the target is an async function.", "to PI north' in args2 # As part of scheduler._get_schedules(),", "log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() # WHEN #", "assert tasks[0].cancel_requested is None assert tasks[0].start_time is not None assert", "if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv =", "'_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"),", "= await scheduler.get_tasks() # THEN assert schedule.process_name == tasks[0].process_name assert", "task %s pid %s\\n%s\" in args assert 'OMF to PI", "{ \"name\": \"North Readings to PI\", \"script\": [ \"tasks/north\", \"--stream_id\",", "is True assert 1 == queue_task.call_count calls = [call(\"Enabled Schedule", "assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == log_exception.call_count log_params =", "ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule)", "not None @pytest.mark.asyncio async def test__check_schedules(self, mocker): # TODO: Mandatory", "self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN", "# purge schedule # WHEN schedule = await scheduler.get_schedule(schedule_id) #", "sch_id assert (scheduler._schedules[sch_id]).enabled is True log_params = \"Schedule %s already", "9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } class MockStorageAsync(StorageClientAsync): schedules =", "@pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\", [ ('\"Blah\" 0 days', True), ('12:30:11', False),", "return \"\" async def mock_process(): m = MagicMock() m.pid =", "schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN # Now confirm", "0, \"exclusive\": \"t\", \"enabled\": \"f\" }, ] scheduled_processes = [", "\"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North Readings to PI\", \"state\": 1, \"start_time\": \"2018-02-06", "# THEN assert scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time is not", "def test__schedule_next_timed_task(self, mocker): # TODO: Mandatory - Add negative tests", "assert tasks[0].exit_code is '0' @pytest.mark.asyncio async def test_get_tasks_exception(self, mocker): #", "Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) #", "== len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule) # THEN # Confirm", "}, } # Changed in version 3.8: patch() now returns", "\"purge\" @pytest.mark.asyncio async def test_get_schedule_exception(self, mocker): # GIVEN scheduler, schedule,", "assert task.state is not None assert task.cancel_requested is None assert", "\"info\") log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules',", "'_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion') #", "WHEN scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task is not None @pytest.mark.asyncio", "def scheduler_fixture(self, mocker): # Changed in version 3.8: patch() now", "return { \"count\": len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks } @classmethod async def", "'_ready', True) # Confirm there are 14 schedules assert len(scheduler._storage_async.schedules)", "log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2,", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug',", "* from fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync __author__", "{ \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North Readings to PI\", \"state\": 1,", "function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv", "# THEN assert earliest_start_time is not None assert 3 ==", "day 12:30:11', False), ('1 day 12:40:11', False), ('2 days', True),", "in 'await' expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): #", "@pytest.mark.asyncio async def test__get_process_scripts(self, mocker): # GIVEN scheduler = Scheduler()", "current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time - 3600, _paused=False,", "# WHEN await scheduler.enable_schedule(random_schedule_id) # THEN log_params = \"No such", "repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules')", "= mocker.patch.object(scheduler._logger, \"info\") mock_schedules = dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"),", "log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600)", "scheduler._max_running_tasks is None assert scheduler._max_completed_task_age is None await scheduler._read_config() #", "mocker): # TODO: Mandatory - Add negative tests for full", "WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) + 1 ==", "current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999,", "# THEN with pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async", "async def test__check_schedules(self, mocker): # TODO: Mandatory - Add negative", "await mock_process() else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio,", "yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule) #", "0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def", "@pytest.mark.asyncio async def test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync): def __init__(self,", "# WHEN # THEN with pytest.raises(TaskNotFoundError) as excinfo: tasks =", "await scheduler._read_config() # THEN assert 1 == cr_cat.call_count assert 1", "sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector sch = scheduler._schedules[sch_id] sch_execution", "OMF to PI North # WHEN status, message = await", "test__read_storage(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "= uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id) # THEN log_params =", "is None @pytest.mark.asyncio async def test_get_task(self, mocker): # GIVEN scheduler,", "pytest.raises(TaskNotFoundError) as excinfo: tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def", "scheduler._start_task(schedule) # THEN # Confirm that task has started assert", "'_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info =", "test_get_schedules(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "'_ready', False) mocker.patch.object(scheduler, '_paused', True) # WHEN # THEN with", "scheduler._check_processes_pending is True @pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker): # GIVEN", "to PI north\", \"schedule_type\": 3, \"schedule_interval\": \"00:00:30\", \"schedule_time\": \"\", \"schedule_day\":", "assert tasks[0].reason is None assert tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested", "mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\")", "mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler,", "started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN", "args args, kwargs = log_info.call_args_list[0] assert \"Process started: Schedule '%s'", "assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count # assert", "== schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self, mocker): # GIVEN scheduler,", "tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested is None assert tasks[0].start_time is", "len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not", "test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False #", "= scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\", name=\"OMF to PI", "time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') # WHEN # THEN", "len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks = await scheduler.get_tasks()", "these exceptions are being raised despite mocking _purge_tasks_task, _scheduler_loop_task calls", "async def test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" #", "north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready',", "# WHEN tasks = await scheduler.get_tasks() # THEN assert schedule.process_name", "'OMF to PI north' in args2 @pytest.mark.asyncio async def test__schedule_next_task(self,", "%s already enabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker):", "query_payload): if table_name == 'tasks': return { \"count\": len(MockStorageAsync.tasks), \"rows\":", "mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") #", "Mandatory - Add negative tests for full code coverage #", "\"state\", {\"alias\": \"start_time\", \"column\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\": \"end_time\",", "log_params = \"Queued schedule '%s' for execution\", 'purge' # log_info.assert_called_with(*log_params)", "{ \"description\": \"The maximum number of tasks that can be", "len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules } if table_name == 'scheduled_processes': return {", "test__schedule_first_task(self, mocker): # TODO: Mandatory - Add negative tests for", "time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process = scheduler._TaskProcess()", "= uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge schedule # WHEN schedule = await", "0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule) # THEN #", "current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id =", "= mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id =", "schedule.process_name == tasks[0].process_name assert tasks[0].reason is '' assert tasks[0].state is", "def test__get_process_scripts(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "listener south\", \"schedule_type\": 1, \"schedule_interval\": \"00:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0,", "day 12:40:11', False), ('2 days', True), ('2 days 00:00:59', False),", "True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert", "False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 ==", "log_debug = mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler,", "WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt)", "async def test__read_storage(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule) #", "args @pytest.mark.asyncio async def test_cancel_task_exception(self, mocker): # GIVEN scheduler, schedule,", "schedule.schedule_id == schedule_id assert schedule.name == \"purge\" assert schedule.schedule_type ==", "(scheduler._schedules[sch_id]).enabled is False log_params = \"Schedule %s already disabled\", str(sch_id)", "test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler = Scheduler() schedule_id = uuid.uuid4()", "have a status of running\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\":", "await asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule = mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id", "None assert scheduler._last_task_purge_time is not None @pytest.mark.asyncio async def test__check_purge_tasks(self,", "'_ready', True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to", "\"address\": \"0.0.0.0\", \"protocol\": \"http\" } class MockStorageAsync(StorageClientAsync): schedules = [", "with pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo:", "'schedules': return { \"count\": len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules } if table_name", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts()", "ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0 temp_schedule.time =", "calls = [call('SCHCH', {'schedule': {'name': 'OMF to PI north', 'repeat':", "'_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN # THEN", "[ ('\"Blah\" 0 days', True), ('12:30:11', False), ('0 day 12:30:11',", "@pytest.mark.asyncio async def test_stop(self, mocker): # TODO: Mandatory - Add", "with pytest.raises(Exception): await scheduler._get_process_scripts() log_args = 'Query failed: %s', 'scheduled_processes'", "with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive =", "def test__start_task(self, mocker): # TODO: Mandatory - Add negative tests", "table that do not have a status of running\", \"type\":", "calls = [call('An exception was raised by Scheduler._purge_tasks %s', \"object", "assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) # WHEN #", "== len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) # WHEN # Now delete", "'_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task',", "self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused', True) # WHEN #", "\"object MagicMock can't be used in 'await' expression\"), call('An exception", "with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self,", "is 0 # 0 for Interval Schedule assert schedule.repeat ==", "await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) + 1 ==", "exceptions are being raised despite mocking _purge_tasks_task, _scheduler_loop_task calls =", "'%s'\\n\", 'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')]", "scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert 2 == log_info.call_count # args,", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await", "None assert scheduler._max_completed_task_age is None await scheduler._read_config() # THEN assert", "= uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup await scheduler._get_schedules() # Confirm no. of", "del temp_schedule assert str(ex).endswith(\"name can not be empty\") with pytest.raises(ValueError)", "assert 1 == resume_sch.call_count assert 0 == enable_schedule.call_count assert 0", "= await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup await scheduler._get_schedules()", "pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo: await", "== schedule_row[1] assert schedule.schedule_type == schedule_row[2] assert schedule_row[3] is 0", "] def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host,", "queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is", "task.cancel_requested is None assert task.start_time is not None assert task.end_time", "Readings to OCS\", \"script\": [ \"tasks/north\", \"--stream_id\", \"4\", \"--debug_level\", \"1\"", "schedule_row[8] assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self, mocker):", "Now confirm no schedule is deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready',", "@classmethod async def delete_from_tbl(cls, table_name, condition=None): pass @classmethod async def", "# WHEN task = await scheduler.get_task(task_id) # THEN assert schedule.process_name", "repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') # WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id,", "import uuid import time import json from unittest.mock import MagicMock,", "log_info.call_count # assert call(\"Queued schedule '%s' for execution\", 'OMF to", "assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks", "log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self, mocker): # GIVEN scheduler =", "Schedule) assert schedule.schedule_id == schedule_row[0] assert schedule.name == schedule_row[1] assert", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\")", "to PI\", \"script\": [ \"tasks/north\", \"--stream_id\", \"1\", \"--debug_level\", \"1\" ]", "@pytest.mark.asyncio async def test__check_schedules(self, mocker): # TODO: Mandatory - Add", "tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self, mocker): #", "# THEN assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is ''", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler,", "uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message =", "\"column\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"], \"limit\": 100} args,", "process '%s'\\n\", 'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to", "north', 'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName':", "\"name\": \"COAP\", \"script\": [ \"services/south\" ] }, { \"name\": \"North", "to PI' in args @pytest.mark.asyncio async def test_purge_tasks(self, mocker): #", "Add negative tests for full code coverage # GIVEN scheduler", "log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup #", "# THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params = \"Queued schedule", "\"exclusive\": \"t\", \"enabled\": \"f\" }, { \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\",", "is True assert message == \"Schedule successfully enabled\" assert (scheduler._schedules[sch_id]).id", "scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time = await scheduler._check_schedules()", "= await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() # WHEN # THEN", "stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call =", "is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def", "= mocker.patch.object(scheduler._logger, \"exception\") # WHEN # THEN with pytest.raises(Exception): await", "# THEN log_params = \"No such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params)", "'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status,", "as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self, mocker): #", "None assert task.cancel_requested is None assert task.start_time is not None", "await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN with", "\"protocol\": \"http\" } class MockStorageAsync(StorageClientAsync): schedules = [ { \"id\":", "import sys import copy import pytest from fledge.services.core.scheduler.scheduler import Scheduler,", "def test__resume_check_schedules(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') # WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "\"description\": \"The maximum number of tasks that can be running", "scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self, mocker):", "= MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) # WHEN", "fledge.common.storage_client.storage_client import StorageClientAsync __author__ = \"<NAME>\" __copyright__ = \"Copyright (c)", "MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN # Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task',", "and assert that the North task has been queued await", "assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False log_params =", "= Scheduler() schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test", "'' assert task.state is not None assert task.cancel_requested is None", "log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self, mocker): # GIVEN scheduler =", "pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo: await", "await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self, mocker): # GIVEN scheduler,", "scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 ==", "@pytest.mark.asyncio async def test_queue_task(self, mocker): # GIVEN scheduler = Scheduler()", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN", "is None calls = [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True)", "time_after_call > time_before_call assert 3 == log_info.call_count args0, kwargs0 =", "def test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None):", "core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10,", "= uuid.uuid4() with pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id) # THEN", "== tasks[0].process_name assert tasks[0].reason is None assert tasks[0].state == Task.State.RUNNING", "len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not implemented in main Scheduler class.\") async", "Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat", "@pytest.mark.asyncio async def test_disable_schedule(self, mocker): # GIVEN scheduler = Scheduler()", "mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules()", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there", "_get_storage_service(self, host, port): return { \"id\": uuid.uuid4(), \"name\": \"Fledge Storage\",", "tasks[0].reason is '' assert tasks[0].state is not None assert tasks[0].cancel_requested", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False)", "assert \"Process started: Schedule '%s' process '%s' task %s pid", "@pytest.mark.asyncio async def test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro(): return \"\"", "'exclusive': True, 'processName': 'North Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio", "\"info\") await scheduler._get_schedules() sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup mocker.patch.object(scheduler, '_ready',", "THEN assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async def test_get_schedule(self, mocker):", "async def test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" #", "('2 days 00:00:59', False), ('00:25:61', True) ]) async def test__get_schedules(self,", "\"schedule_type\": 2, \"schedule_interval\": \"00:00:15\", \"schedule_time\": \"00:00:15\", \"schedule_day\": 3, \"exclusive\": \"f\",", "# WHEN processes = await scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes)", "assert 1 == len(tasks) assert schedule.process_name == tasks[0].process_name assert tasks[0].reason", "Now delete schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN #", "# log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler", "'North Readings to PI' in args @pytest.mark.asyncio async def test_purge_tasks(self,", "in args args, kwargs = log_info.call_args_list[1] assert \"Stopping process: Schedule", "sys.version_info.minor >= 8: _rv = await mock_process() else: _rv =", "log_params = \"Schedule %s already disabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async", "current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id =", "table_name, payload): pass @classmethod async def update_tbl(cls, table_name, payload): #", "#Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) # Confirm there are", "async def test__get_process_scripts_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North", "= MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await scheduler._get_process_scripts() # THEN assert", "assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert 2 == log_info.call_count #", "\"enabled\": \"f\" }, ] scheduled_processes = [ { \"name\": \"purge\",", "def test_cancel_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "= \"No such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def", "schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10,", "\"North Readings to PI\", \"script\": [ \"tasks/north\", \"--stream_id\", \"1\", \"--debug_level\",", "{'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat':", "@pytest.mark.skip(\"Need a suitable fixture\") @pytest.mark.asyncio async def test_get_task_not_found(self, mocker): #", "== len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler", "def test_delete_schedule_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "test__read_config(self, mocker): async def get_cat(): return { \"max_running_tasks\": { \"description\":", "processes should be left pending assert 0 == len(scheduler._task_processes) assert", "_last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() # THEN assert", "has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count", "= await scheduler.enable_schedule(sch_id) # THEN assert status is True assert", "assert that the North task has been queued await scheduler.queue_task(schedule.id)", "'exclusive': True, 'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async", "\"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" }, { \"id\":", "PI\") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await", "table_name == \"schedules\": return {\"count\": 1} @classmethod async def delete_from_tbl(cls,", "}, { \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\", \"schedule_name\": \"COAP listener south\",", "# THEN assert retval is True assert scheduler._schedule_executions is None", "== schedule_row[5] assert schedule.exclusive == schedule_row[7] assert schedule.enabled == schedule_row[8]", "\".1\") mock_task_process.schedule = mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id = mock_task_id", "'_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") schedule_id", "command: %s', 'scheduled_processes'), call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio", "scheduler._paused is False assert scheduler._start_time is None calls = [call('Processing", "# Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN", "scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self, mocker): # GIVEN scheduler, schedule,", "exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler,", "scheduled_processes = [ { \"name\": \"purge\", \"script\": [ \"tasks/purge\" ]", "pytest.raises(Exception): await scheduler._get_schedules() assert 1 == log_exception.call_count else: await scheduler._get_schedules()", "'%s' for execution\", 'OMF to PI north') == args args,", "log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN with pytest.raises(Exception) as", "to PI north', 'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive':", "{\"count\": 1} @classmethod async def delete_from_tbl(cls, table_name, condition=None): pass @classmethod", "True # WHEN await scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution)", "mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task')", "assert scheduler._start_time is None calls = [call('Processing stop request'), call('Stopped')]", "no task processes should be left pending assert 0 ==", "log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task()))", "async def insert_into_tbl(cls, table_name, payload): pass @classmethod async def update_tbl(cls,", "import * from fledge.common.storage_client.storage_client import StorageClientAsync __author__ = \"<NAME>\" __copyright__", "assert message == \"Schedule successfully enabled\" assert (scheduler._schedules[sch_id]).id == sch_id", "> time_before_call assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0]", "test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "return { \"id\": uuid.uuid4(), \"name\": \"Fledge Storage\", \"type\": \"Storage\", \"service_port\":", "assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self, mocker): #", "return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) ==", "_max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time", "def query_tbl(cls, table_name, query=None): if table_name == 'schedules': return {", "payload == p @pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker): # GIVEN", "test_save_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler =", "with pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo:", "is not None assert tasks[0].end_time is not None assert tasks[0].exit_code", "log_exception = mocker.patch.object(scheduler._logger, \"exception\") # WHEN # THEN with pytest.raises(Exception):", "by Scheduler._scheduler_loop %s', \"object MagicMock can't be used in 'await'", "# WHEN await scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts)", "await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in version 3.8:", "be done\") async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def test_disable_schedule(self,", "schedule.exclusive is True assert schedule.enabled is True assert schedule.process_name ==", "assert scheduler._schedule_executions[sch_id] is True # WHEN await scheduler.queue_task(sch_id) # THEN", "\"t\", \"enabled\": \"t\" }, { \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North Readings", "PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule':", "= mocker.patch.object(scheduler._logger, \"info\") return scheduler, schedule, log_info, log_exception, log_error, log_debug", "mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat", "schedule_row) temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat", "<filename>tests/unit/python/fledge/services/core/scheduler/test_scheduler.py<gh_stars>10-100 # -*- coding: utf-8 -*- # FLEDGE_BEGIN # See:", "def test_get_running_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "is None assert tasks[0].start_time is not None assert tasks[0].end_time is", "async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8:", "await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time = await", "condition=None): pass @classmethod async def query_tbl_with_payload(cls, table_name, query_payload): if table_name", "coding: utf-8 -*- # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END", "# OMF to PI North # WHEN status, message =", "scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN next_dt", "sch_id assert (scheduler._schedules[sch_id]).enabled is True assert 1 == queue_task.call_count calls", "Scheduler._purge_tasks %s', \"object MagicMock can't be used in 'await' expression\"),", "log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(ScheduleNotFoundError)", "= log_info.call_args_list[0] # assert (\"Queued schedule '%s' for execution\", 'OMF", "= mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop',", "\"column\": \"end_time\"}, \"reason\", \"exit_code\"], \"where\": {\"column\": \"id\", \"condition\": \"=\", \"value\":", "= datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\")", "\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats collector\", \"schedule_name\": \"stats collection\", \"schedule_type\": 2, \"schedule_interval\":", "True assert 1 == queue_task.call_count calls = [call(\"Enabled Schedule '%s/%s'", "Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert", "\"protocol\": \"http\" } @classmethod async def insert_into_tbl(cls, table_name, payload): pass", "True) mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\")", "cr_cat = mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv)", "mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat", "== schedule_row[2] assert schedule_row[3] is 0 # 0 for Interval", "= await scheduler._check_schedules() # THEN assert earliest_start_time is not None", "schedule less assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules) @pytest.mark.asyncio async", "== len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0] assert 'OMF to PI", "THEN assert time_after_call > time_before_call assert 3 == log_info.call_count args0,", "already enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True", "# GIVEN scheduler = Scheduler() schedule_id = uuid.uuid4() schedule_row =", "'%s' for execution\", 'OMF to PI north') == log_info.call_args_list[0] args,", "schedule_row[0] assert schedule.name == schedule_row[1] assert schedule.schedule_type == schedule_row[2] assert", "tests.\") async def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self,", "await scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id ==", "resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") schedule_id =", "== sch_id assert (scheduler._schedules[sch_id]).enabled is True log_params = \"Schedule %s", "= await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as", "list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task = await scheduler.get_task(task_id) # THEN assert", "log_info.assert_has_calls(calls, any_order=True) calls = [call('Database command: %s', 'scheduled_processes'), call('Database command:", "assert (\"Queued schedule '%s' for execution\", 'OMF to PI north')", "type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule", "(based on the start time), for a rows \" \"in", "Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule", "tasks\\n%s\" in args assert 'OMF to PI north' in args", "is None assert scheduler._schedules is None assert scheduler._process_scripts is None", "empty\") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name", "== len(tasks) assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is None", "successfully enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True", "test__check_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests for", "be between 1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To be done\") async", "core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks()", "'OMF to PI north' in args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not suitable", "str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\": { \"description\": \"The maximum age, in days", "== audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type':", "None # WHEN await scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is", "been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in", "mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id,", "assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker): #", "3600, _paused=False, _task_processes={}) # WHEN retval = await scheduler.stop() #", "= scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules')", "else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async def", "\"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules)", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') #", "uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert", "with pytest.raises(TaskNotFoundError) as excinfo: tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async", "scheduler._get_schedules() log_args = 'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async", "can be running at any given time\", \"type\": \"integer\", \"default\":", "# WHEN await scheduler.start() # THEN assert scheduler._ready is True", "Scheduler() schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule',", "mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN # THEN if is_exception is", "= await scheduler.get_task(task_id) # THEN assert schedule.process_name == task.process_name assert", "def test_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests", "can not be None') with pytest.raises(ValueError) as ex: temp_schedule =", "collection' in args3 @pytest.mark.asyncio async def test__get_process_scripts(self, mocker): # GIVEN", "= await self.scheduler_fixture(mocker) # Assert that there is no North", "payload == p @pytest.mark.asyncio async def test_get_tasks(self, mocker): # GIVEN", "\"00:00:15\", \"schedule_day\": 3, \"exclusive\": \"f\", \"enabled\": \"t\" }, { \"id\":", "log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time)", "\"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" },", "sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 4 ==", "\"process_name\": \"North Readings to PI\", \"schedule_name\": \"OMF to PI north\",", "assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls = [call('Starting'), call('Starting Scheduler: Management", "no schedule is deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1", "assert task.reason is '' assert task.state is not None assert", "0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0] assert 'OMF to", "mock_task_process.process = await asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule = mock_schedule mock_task_id =", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler,", "# WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) ==", "THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_row[0] assert schedule.name", "mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts',", "import asyncio import datetime import uuid import time import json", "if table_name == \"schedules\": return {\"count\": 1} @classmethod async def", "schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule", "started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks = await", "there is no task queued for mock_schedule with pytest.raises(KeyError) as", "test_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests for", "] scheduled_processes = [ { \"name\": \"purge\", \"script\": [ \"tasks/purge\"", "side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") # WHEN # THEN with", "to PI north') == args args, kwargs = log_info.call_args_list[0] assert", "import datetime import uuid import time import json from unittest.mock", "Confirm that cancel request has not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested", "isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in version 3.8: patch() now returns", "with pytest.raises(NotReadyError) as excinfo: await scheduler.start() with pytest.raises(NotReadyError) as excinfo:", "= mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule) # THEN", "message == \"Schedule successfully disabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert", "\"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id", "schedule_row) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_row[0]", "\"schedule_name\": \"OMF to OCS north\", \"schedule_type\": 3, \"schedule_interval\": \"1 day", "not have a status of running\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS),", "= dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\",", "OCS\", \"schedule_name\": \"OMF to OCS north\", \"schedule_type\": 3, \"schedule_interval\": \"1", "'_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, \"exception\") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] =", "to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls) assert", "uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger =", "for this schedule at first with pytest.raises(KeyError) as excinfo: assert", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug =", "uuid.uuid4() with pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id) # THEN payload", "\"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed in version", "audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True,", "self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup queue_task = mocker.patch.object(scheduler, 'queue_task',", "excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not fit for unit test.\") @pytest.mark.asyncio", "_rv = await get_cat() else: _rv = asyncio.ensure_future(get_cat()) # GIVEN", "and sys.version_info.minor >= 8: _rv = await get_cat() else: _rv", "# OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test", "any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler, schedule,", "\"stats collection\", \"schedule_type\": 2, \"schedule_interval\": \"00:00:15\", \"schedule_time\": \"00:00:15\", \"schedule_day\": 3,", "assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule',", "uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN status, message = await scheduler.disable_schedule(sch_id)", "assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False assert 2", "# WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution,", "\"script\": [ \"tasks/north\", \"--stream_id\", \"1\", \"--debug_level\", \"1\" ] }, {", "args1 assert 'OMF to PI north' in args2 # As", "\"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" }, {", "True, 'processName': 'North Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async", "'_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") enable_schedule", "name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess')", "sch_id assert (scheduler._schedules[sch_id]).enabled is False log_params = \"Schedule %s already", "WHEN earliest_start_time = await scheduler._check_schedules() # THEN assert earliest_start_time is", "Find why these exceptions are being raised despite mocking _purge_tasks_task,", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN schedules =", "uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id]", "be used in 'await' expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self,", "north' in args assert 'North Readings to PI' in args", "WHEN # THEN with pytest.raises(NotReadyError) as excinfo: await scheduler.start() with", "scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process = await asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule", "== disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro():", "confirm there is one schedule less assert len(scheduler._storage_async.schedules) - 1", "mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time = await scheduler._check_schedules() #", "log_debug = await self.scheduler_fixture(mocker) # WHEN schedules = await scheduler.get_schedules()", "\"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"start_time\"},", "pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await", "message == \"Schedule is already enabled\" assert (scheduler._schedules[sch_id]).id == sch_id", "await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError)", "4, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\":", "scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time) time_after_call =", "= 'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\", [", "first_task.call_count assert 1 == resume_sch.call_count assert 0 == enable_schedule.call_count assert", "\"\" async def mock_process(): m = MagicMock() m.pid = 9999", "sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North # WHEN", "# THEN payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\":", "task.exit_code is '0' @pytest.mark.skip(\"Need a suitable fixture\") @pytest.mark.asyncio async def", "process '%s'\\n\", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat =", "len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN # Now delete schedule with", "assert 'Query failed: %s' == args[0] p = json.loads(args[1]) assert", "assert 1 == log_exception.call_count else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) ==", "== get_cat.call_count assert scheduler._max_running_tasks is not None assert scheduler._max_completed_task_age is", "# FLEDGE_END import asyncio import datetime import uuid import time", "None assert tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested is None assert", "_schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") #", "be of type datetime.timedelta') with pytest.raises(ValueError) as ex: temp_schedule =", "scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as", "is None assert scheduler._task_processes is None assert scheduler._schedules is None", "@pytest.mark.skip(reason=\"To be done\") async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def", "temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = \"\" await scheduler.save_schedule(temp_schedule) del", "schedule.enabled == schedule_row[8] assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio async def", "there is no task queued for schedule with pytest.raises(KeyError) as", "that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1", "\"enabled\": \"t\" }, { \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North Readings to", "side_effect=Exception()) # WHEN with pytest.raises(Exception) as excinfo: tasks = await", "\"schedule_interval\": \"00:00:15\", \"schedule_time\": \"00:00:15\", \"schedule_day\": 3, \"exclusive\": \"f\", \"enabled\": \"t\"", "'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})]", "'OMF to PI north' in args assert 'North Readings to", "test__resume_check_schedules(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "def test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler = Scheduler() schedule_id =", "coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)", "THEN log_params = \"No such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio", "Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self, mocker): #", "is True assert schedule.enabled is True assert schedule.process_name == \"purge\"", "# THEN with pytest.raises(TaskNotFoundError) as excinfo: tasks = await scheduler.get_task(uuid.uuid4())", "\"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id", "args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed,", "\"count\": len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules } if table_name == 'scheduled_processes': return", "schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self, mocker): # GIVEN scheduler, schedule,", "sch_execution.next_start_time # WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch,", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger,", "cancel request has not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None", "3600) # TODO: Remove after implementation of above test test__read_config()", "return { \"max_running_tasks\": { \"description\": \"The maximum number of tasks", "any_order=True) @pytest.mark.asyncio async def test_stop(self, mocker): # TODO: Mandatory -", "await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro():", "assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1 == audit_logger.call_count", "\"schedule_name\": \"stats collection\", \"schedule_type\": 2, \"schedule_interval\": \"00:00:15\", \"schedule_time\": \"00:00:15\", \"schedule_day\":", "== len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker): # GIVEN scheduler", "assert schedule.process_name == \"purge\" @pytest.mark.asyncio async def test_get_schedule_exception(self, mocker): #", "log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN #", "WHEN schedule = await scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule, Schedule)", "await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat must be of type", "\"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" },", "3 and sys.version_info.minor >= 8: _rv = await get_cat() else:", "= time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\")", "log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler =", "# THEN # Confirm that task has started assert 1", "log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self, mocker): # GIVEN scheduler, schedule,", "above test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False", "True), ('2 days 00:00:59', False), ('00:25:61', True) ]) async def", "audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL,", "[ \"services/south\" ] }, { \"name\": \"North Readings to PI\",", "call('Starting Scheduler: Management port received is %d', 9999)] log_info.assert_has_calls(calls, any_order=True)", "'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1", "{ \"count\": len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules } if table_name == 'scheduled_processes':", "} if table_name == 'scheduled_processes': return { \"count\": len(MockStorageAsync.scheduled_processes), \"rows\":", "# backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that", "log_info = mocker.patch.object(scheduler._logger, \"info\") await scheduler._get_schedules() sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") #", "enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker):", "None assert scheduler._max_completed_task_age is not None @pytest.mark.asyncio async def test_start(self,", "= mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") enable_schedule = mocker.patch.object(scheduler,", "_purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() # THEN", "# Confirm there are 14 schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "\"rows\": MockStorageAsync.tasks } @classmethod async def query_tbl(cls, table_name, query=None): if", "def test_get_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "mocker.patch.object(scheduler._logger, \"info\") enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler,", "assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine", "sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN # THEN with", "side_effect=Exception()) # WHEN # THEN with pytest.raises(Exception): await scheduler._get_schedules() log_args", "WHEN processes = await scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes) ==", "async def test_get_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def", "'_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time = await scheduler._check_schedules() # THEN", "= MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time()", "log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row =", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mock_schedules", "'_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger, \"info\") await scheduler._get_schedules() sch_id =", "\"Copyright (c) 2017 OSIsoft, LLC\" __license__ = \"Apache 2.0\" __version__", "{ \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\", \"schedule_name\": \"purge\", \"schedule_type\": 4, \"schedule_interval\":", "\"schedule_day\": 3, \"exclusive\": \"f\", \"enabled\": \"t\" }, { \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\",", "in version 3.8: patch() now returns an AsyncMock if the", "True) # WHEN # THEN with pytest.raises(NotReadyError) as excinfo: await", "_rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler,", "= scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30,", "await scheduler.get_tasks() # THEN assert schedule.process_name == tasks[0].process_name assert tasks[0].reason", "# THEN assert 1 == cr_cat.call_count assert 1 == get_cat.call_count", "in args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not suitable for unit testing. Will", "log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): # GIVEN scheduler =", "{ \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North Readings to OCS\", \"schedule_name\": \"OMF", "(scheduler._schedules[sch_id]).enabled is False assert 2 == log_info.call_count calls = [call('No", "with pytest.raises(Exception): await scheduler._get_schedules() assert 1 == log_exception.call_count else: await", "WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "PI' in args @pytest.mark.asyncio async def test_cancel_task_exception(self, mocker): # GIVEN", "- 3600) # TODO: Remove after implementation of above test", "== len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls = [call('Starting'), call('Starting", "# THEN with pytest.raises(NotReadyError) as excinfo: await scheduler.start() with pytest.raises(NotReadyError)", "MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\")", "= \"Queued schedule '%s' for execution\", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio", "mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception())", "mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") # WHEN #", "= list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks = await scheduler.get_tasks() # THEN", "mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler,", "len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count calls = [call('SCHCH',", "mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN # THEN with", "async def test_delete_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "= time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules()", "Only valid for test_save_schedule_update if table_name == \"schedules\": return {\"count\":", "query_tbl_with_payload(cls, table_name, query_payload): if table_name == 'tasks': return { \"count\":", "is None assert scheduler._max_completed_task_age is None await scheduler._read_config() # THEN", "THEN with pytest.raises(NotReadyError) as excinfo: await scheduler.start() with pytest.raises(NotReadyError) as", "= mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler,", "True) mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info =", "backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there", "def test_disable_schedule(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "backup queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information',", "def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def test_disable_schedule(self, mocker): # GIVEN", "30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North Readings", "tasks[0].end_time is not None assert tasks[0].exit_code is '0' @pytest.mark.asyncio async", "\"disable_schedule\", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test", "== len(scheduler._schedules) assert 1 == log_exception.call_count log_params = 'Attempt to", "schedule at first with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id] is", "== len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler,", "'2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule '%s/%s' process '%s'\\n\", 'OMF to PI north',", "assert tasks[0].exit_code is None @pytest.mark.asyncio async def test_get_task(self, mocker): #", "# WHEN with pytest.raises(Exception) as excinfo: tasks = await scheduler.get_tasks()", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that", "datetime import uuid import time import json from unittest.mock import", "empty\") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat", "mocker): pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker): # TODO: Mandatory", "await self.scheduler_fixture(mocker) # Assert that there is no task queued", "no. of schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True)", "no North task queued for schedule with pytest.raises(KeyError) as excinfo:", "== \"purge\" @pytest.mark.asyncio async def test_get_schedule_exception(self, mocker): # GIVEN scheduler,", "= datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day must be", "assert scheduler._schedules is None assert scheduler._process_scripts is None assert scheduler._ready", "= [ { \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\", \"schedule_name\": \"purge\", \"schedule_type\":", "# args, kwargs = log_info.call_args_list[0] # assert (\"Queued schedule '%s'", "sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await", "= time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task()))", "enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule,", "and assert that the task has been queued await scheduler.queue_task(schedule.id)", "Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive':", "query_tbl(cls, table_name, query=None): if table_name == 'schedules': return { \"count\":", "any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler =", "@pytest.mark.asyncio async def test__read_config(self, mocker): async def get_cat(): return {", "== log_info.call_count calls = [call('No Task running for Schedule %s',", "\"id\", \"condition\": \"=\", \"value\": str(task_id)}} args, kwargs = log_exception.call_args assert", "for execution\", 'OMF to PI north') == log_info.call_args_list[0] args, kwargs", "self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN with pytest.raises(Exception)", "'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True)", "@pytest.mark.asyncio async def test_get_running_tasks(self, mocker): # GIVEN scheduler, schedule, log_info,", "tasks[0].reason is None assert tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested is", "deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker): # GIVEN", "\"info\") current_time = time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time)", "@pytest.allure.story(\"scheduler\") class TestScheduler: async def scheduler_fixture(self, mocker): # Changed in", "test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN scheduler,", "scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False,", "0, \"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\":", "day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') # WHEN #", "process_name='TestProcess') # WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert", "part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is", "be None') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "\"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"start_time\"}, {\"alias\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\":", "log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule':", "else: _rv = asyncio.ensure_future(get_cat()) # GIVEN scheduler = Scheduler() scheduler._storage", "sch_execution.next_start_time # THEN assert time_after_call > time.mktime(curr_time.timetuple()) assert 4 ==", "in args @pytest.mark.asyncio async def test__start_task(self, mocker): # TODO: Mandatory", "= await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id =", "= None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive can not", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler,", "await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive can not be None')", "test_save_schedule_update if table_name == \"schedules\": return {\"count\": 1} @classmethod async", "%s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\", [ ('\"Blah\" 0 days',", "'d1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count calls =", "core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, \"exception\") new_schedules = copy.deepcopy(MockStorageAsync.schedules)", "async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def test_disable_schedule(self, mocker): #", "mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN", "pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await", "\"schedule_time\": \"00:00:15\", \"schedule_day\": 3, \"exclusive\": \"f\", \"enabled\": \"t\" }, {", "= {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"column\": \"start_time\",", "with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo:", "'_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN # THEN with pytest.raises(ScheduleNotFoundError)", "queued for this schedule at first with pytest.raises(KeyError) as excinfo:", "datetime.time') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day", "sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call", "test__get_schedules_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "scheduler._get_process_scripts() log_args = 'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval,", "scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call = sch_execution.next_start_time # THEN assert time_after_call", "'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings", "assert schedule.schedule_id == schedule_id assert schedule.name == \"purge\" assert schedule.schedule_type", "THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1 ==", "log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused', True)", "is '0' @pytest.mark.skip(\"Need a suitable fixture\") @pytest.mark.asyncio async def test_get_task_not_found(self,", "\"reason\", \"exit_code\"], \"where\": {\"column\": \"id\", \"condition\": \"=\", \"value\": str(task_id)}} args,", "mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception())", "assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 0", "WHEN task = await scheduler.get_task(task_id) # THEN assert schedule.process_name ==", "schedule = await scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule, Schedule) assert", "'_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North", "scheduler._start_time is None calls = [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls,", "THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count calls", "'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count", "'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North", "== len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0]", "tasks = await scheduler.get_tasks() # THEN assert schedule.process_name == tasks[0].process_name", "log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task()))", "assert schedule.schedule_id == schedule_row[0] assert schedule.name == schedule_row[1] assert schedule.schedule_type", "mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN # THEN with pytest.raises(Exception): await", "Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that task has", "task.reason is '' assert task.state is not None assert task.cancel_requested", "stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO: Find why these", "log_exception = mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id = uuid.uuid4() # WHEN await", "WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert", "has not started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN", "args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3", "payload): # Only valid for test_save_schedule_update if table_name == \"schedules\":", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0 temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule)", "args1 assert 'OMF to PI north' in args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop()", "is not None assert scheduler._max_completed_task_age is not None @pytest.mark.asyncio async", "THEN assert time_after_call > time_before_call assert 4 == log_info.call_count args0,", "return_value=\"North Readings to PI\") # WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN", "[call('SCHCH', {'schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled':", "Schedule '%s' process '%s' task %s pid %s, %s running", "as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0 temp_schedule.time", "MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() curr_time", "scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN await scheduler.cancel_task(task_id) # THEN assert", "assert 'OMF to PI north' in args assert 'North Readings", "\"process_name\": \"purge\", \"schedule_name\": \"purge\", \"schedule_type\": 4, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\",", "len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) # WHEN # Now delete schedule", "'_schedule_first_task') # WHEN await scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes) ==", "schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30),", "not None assert task.cancel_requested is None assert task.start_time is not", "00:00:40\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" },", "@pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\"", "# \"stat collector\" appears twice in this list. assert 'stats", "] tasks = [ { \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North Readings", "request has not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None #", "\"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } class MockStorageAsync(StorageClientAsync): schedules", "task processes should be left pending assert 0 == len(scheduler._task_processes)", "process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro())", "raised by Scheduler._scheduler_loop %s', \"object MagicMock can't be used in", "command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_stop(self, mocker):", "assert schedule.schedule_type == schedule_row[2] assert schedule_row[3] is 0 # 0", "test__check_schedules(self, mocker): # TODO: Mandatory - Add negative tests for", "async def test_get_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "task %s pid %s, %s running tasks\\n%s\" in args assert", "started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm", "# WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN assert", "asyncio import datetime import uuid import time import json from", "status is True assert message == \"Schedule successfully disabled\" assert", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler, '_schedule_first_task')", "as excinfo: assert scheduler._schedule_executions[sch_id] is True # WHEN await scheduler.queue_task(sch_id)", "schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\", name=\"OMF to", "test_get_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] # WHEN", "sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN status, message =", "mock_schedule_executions = dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id]", "assert str(ex).endswith('time must be of type datetime.time') with pytest.raises(ValueError) as", "\"schedule_name\": \"purge\", \"schedule_type\": 4, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0,", "mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") return scheduler, schedule, log_info,", "async def get_cat(): return { \"max_running_tasks\": { \"description\": \"The maximum", "the start time), for a rows \" \"in the tasks", "args, kwargs = log_info.call_args_list[1] assert \"Stopping process: Schedule '%s' process", "for a rows \" \"in the tasks table that do", "schedule_row[4] is 0 # 0 for Interval Schedule assert schedule.repeat", "of type datetime.timedelta') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id,", "scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as", "MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager,", "= uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0,", "= mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN", "\"COAP\", \"schedule_name\": \"COAP listener south\", \"schedule_type\": 1, \"schedule_interval\": \"00:00:00\", \"schedule_time\":", "\"schedule_type\": 3, \"schedule_interval\": \"00:00:30\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\",", "= await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self, mocker): # GIVEN", "0, \"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\":", "await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\")", "test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host,", "# THEN assert scheduler._check_processes_pending is True @pytest.mark.asyncio async def test__wait_for_task_completion(self,", "# TODO: Remove after implementation of above test test__read_config() mocker.patch.object(scheduler,", "repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') # WHEN # THEN with", "[ \"tasks/north\", \"--stream_id\", \"4\", \"--debug_level\", \"1\" ] }, ] tasks", "async def test_purge_tasks(self, mocker): # TODO: Mandatory - Add negative", "the North task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id],", "== 3 and sys.version_info.minor >= 8: _rv = await get_cat()", "str(ex).endswith('day must be between 1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To be", "has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm", "a status of running\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS)", "log_debug = await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() # WHEN await", "\"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"end_time\"}, \"reason\", \"exit_code\"], \"where\": {\"column\": \"id\", \"condition\":", "def mock_process(): m = MagicMock() m.pid = 9999 m.terminate =", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id =", "\"services/south\" ] }, { \"name\": \"North Readings to PI\", \"script\":", "to PI north' in args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not suitable for", "core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return { \"id\":", "return_value=_rv) # WHEN assert scheduler._max_running_tasks is None assert scheduler._max_completed_task_age is", "can't be used in 'await' expression\"), call('An exception was raised", "\"reason\", \"exit_code\"], \"limit\": 100} args, kwargs = log_exception.call_args assert 'Query", "log_error, log_debug = await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() # WHEN", "= mocker.patch.object(scheduler._logger, \"exception\") random_schedule_id = uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id)", "with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self,", "type datetime.timedelta') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "await self.scheduler_fixture(mocker) # WHEN schedules = await scheduler.get_schedules() # THEN", "MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host,", "assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls =", "= [call('An exception was raised by Scheduler._purge_tasks %s', \"object MagicMock", "test__schedule_next_timed_task(self, mocker): # TODO: Mandatory - Add negative tests for", "day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert", "mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that task has not started yet", "\"tasks/statistics\" ] }, { \"name\": \"backup\", \"script\": [ \"tasks/backup_postgres\" ]", "return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler,", "collection' in args3 @pytest.mark.asyncio async def test__schedule_first_task(self, mocker): # TODO:", "mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info", "@pytest.mark.asyncio async def test_delete_schedule(self, mocker): # GIVEN scheduler, schedule, log_info,", "with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time =", "= await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused', True) #", "'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count", "THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio", "PI' in args @pytest.mark.asyncio async def test__start_task(self, mocker): # TODO:", "test_get_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "assert tasks[0].start_time is not None assert tasks[0].end_time is None assert", "uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id) # THEN log_params = \"No", "is deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == log_exception.call_count", "= await mock_process() else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv)", "= datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call =", "\"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert", "\"enabled\": \"t\" }, { \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats collector\", \"schedule_name\":", "copy import pytest from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager from", "core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler,", "await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day must be between 1", "time_after_call > time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count args0, kwargs0 =", "mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug =", "args assert 'North Readings to PI' in args @pytest.mark.asyncio async", "log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup", "at first with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id] is True", "PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN", "\"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"], \"limit\": 100} args, kwargs =", "test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "task and assert that the North task has been queued", "exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger, \"exception\") log_error = mocker.patch.object(scheduler._logger, \"error\")", "Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, 'time':", "\"tasks/backup_postgres\" ] }, { \"name\": \"COAP\", \"script\": [ \"services/south\" ]", "async def test__schedule_first_task(self, mocker): # TODO: Mandatory - Add negative", "gets executed, hence # \"stat collector\" appears twice in this", "scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert 2", "log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task", "@pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker): # GIVEN scheduler, schedule, log_info,", "('\"Blah\" 0 days', True), ('12:30:11', False), ('0 day 12:30:11', False),", "%s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_stop(self, mocker): #", "await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) #", "\"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\": \"end_time\", \"column\": \"end_time\", \"format\": \"YYYY-MM-DD", "scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False,", "@pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): # GIVEN scheduler = Scheduler()", "log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection' in args0", "test_enable_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN assert", "# Confirm no. of schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler,", "'OMF to PI north') == log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0]", "== first_task.call_count assert 1 == resume_sch.call_count assert 1 == enable_schedule.call_count", "True, 'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def", "'stats collection' in args3 @pytest.mark.asyncio async def test__get_process_scripts(self, mocker): #", "to OCS north\", \"schedule_type\": 3, \"schedule_interval\": \"1 day 00:00:40\", \"schedule_time\":", "'_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time =", "Schedule assert schedule.repeat == schedule_row[5] assert schedule.exclusive == schedule_row[7] assert", "= lambda: True return m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class TestScheduler: async", "_max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector", "'2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count", "list. assert 'stats collection' in args3 @pytest.mark.asyncio async def test__get_process_scripts(self,", "def test__get_schedules_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "# backup # WHEN status, message = await scheduler.disable_schedule(sch_id) #", "THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def", "task has not started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) #", "'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls)", "async def test_get_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "\"info\") schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North schedule_row", "== 'tasks': return { \"count\": len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks } @classmethod", "mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings", "task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) #", "is '' assert tasks[0].state is not None assert tasks[0].cancel_requested is", "with pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo:", "await scheduler.start() # THEN assert scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes)", "== tasks[0].process_name assert tasks[0].reason is '' assert tasks[0].state is not", "\"tasks/north\", \"--stream_id\", \"1\", \"--debug_level\", \"1\" ] }, { \"name\": \"North", "{ \"name\": \"purge\", \"script\": [ \"tasks/purge\" ] }, { \"name\":", "no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await", "1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks = await scheduler.get_running_tasks() #", "assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0] assert 'OMF", "\"value\": str(task_id)}} args, kwargs = log_exception.call_args assert 'Query failed: %s'", "\"00:00:30\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" },", "\"start_time\"}, {\"alias\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"end_time\"}, \"reason\", \"exit_code\"],", "\"stats collector\", \"schedule_name\": \"stats collection\", \"schedule_type\": 2, \"schedule_interval\": \"00:00:15\", \"schedule_time\":", "asyncio.ensure_future(get_cat()) # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)", "= log_exception.call_args assert 'Query failed: %s' == args[0] p =", "assert 'OMF to PI north' in args2 # As part", "THEN assert scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert", "[ { \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North Readings to PI\", \"state\":", "__init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return", "}, { \"name\": \"North Readings to PI\", \"script\": [ \"tasks/north\",", "scheduler.get_tasks() # THEN assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is", "scheduler._schedule_first_task() also gets executed, hence # \"stat collector\" appears twice", "None assert tasks[0].cancel_requested is None assert tasks[0].start_time is not None", "get_cat() else: _rv = asyncio.ensure_future(get_cat()) # GIVEN scheduler = Scheduler()", "schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert isinstance(schedule, Schedule) assert", "def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port):", "== log_exception.call_count else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio", "# WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4())", "0 for Interval Schedule assert schedule.repeat == schedule_row[5] assert schedule.exclusive", "_core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time - 3600, _paused=False, _task_processes={}) # WHEN retval", "'North Readings to PI' in args @pytest.mark.asyncio async def test__start_task(self,", "assert str(ex).endswith('exclusive can not be None') with pytest.raises(ValueError) as ex:", "[call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive':", "= uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN status, message = await", "len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert", "above, no task processes should be left pending assert 0", "suitable fixture\") @pytest.mark.asyncio async def test_get_task_not_found(self, mocker): # GIVEN scheduler,", "with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id] is True # WHEN", "any given time\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) },", "day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id,", "await scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True)", "THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker):", "must be of type datetime.time') with pytest.raises(ValueError) as ex: temp_schedule", "core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, \"get_category_all_items\",", "being raised despite mocking _purge_tasks_task, _scheduler_loop_task calls = [call('An exception", "test_delete_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "that the North task has been queued await scheduler.queue_task(schedule.id) assert", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule", "THEN with pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def", "== Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0, 3600) assert schedule.exclusive is", "async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" #", "Confirm that task has not started yet assert 0 ==", "Readings to PI' in args @pytest.mark.asyncio async def test_purge_tasks(self, mocker):", "scheduler._max_completed_task_age is not None @pytest.mark.asyncio async def test_start(self, mocker): #", "'debug', side_effect=Exception()) # WHEN # THEN task_id = uuid.uuid4() with", "is already enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is", "Readings to OCS\", \"schedule_name\": \"OMF to OCS north\", \"schedule_type\": 3,", "True assert schedule.enabled is True assert schedule.process_name == \"purge\" @pytest.mark.asyncio", "return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time = await scheduler._check_schedules() # THEN assert", "in this list. assert 'stats collection' in args3 @pytest.mark.asyncio async", "@pytest.mark.asyncio @pytest.mark.skip(reason=\"To be done\") async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async", "assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that", "[ { \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\", \"schedule_name\": \"purge\", \"schedule_type\": 4,", "\"North Readings to PI\", \"schedule_name\": \"OMF to PI north\", \"schedule_type\":", "== sch_id assert (scheduler._schedules[sch_id]).enabled is True assert 1 == queue_task.call_count", "assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True log_params =", "days 00:00:59', False), ('00:25:61', True) ]) async def test__get_schedules(self, test_interval,", "payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"format\":", "failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\", [ ('\"Blah\" 0", "'Attempt to delete an enabled Schedule %s. Not deleted.', str(sch_id)", "THEN assert scheduler._check_processes_pending is True @pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker):", "is_exception\", [ ('\"Blah\" 0 days', True), ('12:30:11', False), ('0 day", "@pytest.mark.asyncio async def test_start(self, mocker): # TODO: Mandatory - Add", "mocker.patch.object(scheduler._logger, \"exception\") # WHEN # THEN with pytest.raises(Exception): await scheduler._get_process_scripts()", "# THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_row[0] assert", "repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process", "payload): pass @classmethod async def update_tbl(cls, table_name, payload): # Only", "\"t\" }, { \"id\": \"2176eb68-7303-11e7-8cf7-a6006ad3dba0\", \"process_name\": \"stats collector\", \"schedule_name\": \"stats", "WHEN await scheduler.disable_schedule(random_schedule_id) # THEN log_params = \"No such Schedule", "to PI\", \"state\": 1, \"start_time\": \"2018-02-06 13:28:14.477868\", \"end_time\": \"2018-02-06 13:28:14.856375\",", "excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self, mocker): # GIVEN", "True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is no task", "task completion, sleep above, no task processes should be left", "Scheduler: Management port received is %d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls", "type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule =", "== disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro():", "async def test_queue_task(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "THEN assert earliest_start_time is not None assert 3 == log_info.call_count", "# stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call", "# WHEN # THEN with pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id)", "Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 ==", "list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks = await scheduler.get_tasks() # THEN assert", "assert 1 == get_cat.call_count assert scheduler._max_running_tasks is not None assert", "'await' expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): # GIVEN", "negative tests for full code coverage # GIVEN scheduler =", "1 == first_task.call_count assert 1 == resume_sch.call_count assert 1 ==", "@pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler, schedule, log_info,", "p @pytest.mark.asyncio async def test_get_tasks(self, mocker): # GIVEN scheduler, schedule,", "== first_task.call_count assert 1 == resume_sch.call_count assert 0 == enable_schedule.call_count", "test__get_process_scripts(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "await scheduler.start() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError)", "=[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat':", "'_resume_check_schedules') # Assert that there is no task queued for", "await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker): # GIVEN scheduler,", "scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time must be of type datetime.time')", "}, \"max_completed_task_age_days\": { \"description\": \"The maximum age, in days (based", "('2 days', True), ('2 days 00:00:59', False), ('00:25:61', True) ])", "'_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion',", "as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks()", "await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError)", "def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN", "'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules')", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused', False)", "# After task completion, sleep above, no task processes should", "log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() curr_time = datetime.datetime.fromtimestamp(current_time)", "True), ('12:30:11', False), ('0 day 12:30:11', False), ('1 day 12:40:11',", "be empty\") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "async def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "as excinfo: tasks = await scheduler.get_tasks() # THEN payload =", "is no task queued for schedule with pytest.raises(KeyError) as excinfo:", "south\", \"schedule_type\": 1, \"schedule_interval\": \"00:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\":", "= mock_task_process mock_schedule_executions = dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] =", "await mock_process() else: _rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage", "for unit testing. Will be tested during System tests.\") async", "THEN task_id = uuid.uuid4() with pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id)", "mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time", "assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self, mocker): #", "execution\", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker): #", "scheduler, schedule, log_info, log_exception, log_error, log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self,", "kwargs2 = log_info.call_args_list[2] assert 'stats collection' in args0 assert 'COAP", "= await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler,", "\"t\", \"enabled\": \"f\" }, ] scheduled_processes = [ { \"name\":", "\"f\" }, ] scheduled_processes = [ { \"name\": \"purge\", \"script\":", "} ] def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self,", "{} already disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is", "running\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } #", "@pytest.mark.asyncio async def test__schedule_first_task(self, mocker): # TODO: Mandatory - Add", "mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger,", "\"1\" ] }, { \"name\": \"North Readings to OCS\", \"script\":", "\"end_time\": \"2018-02-06 13:28:14.856375\", \"exit_code\": \"0\", \"reason\": \"\" } ] def", "as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task", "that do not have a status of running\", \"type\": \"integer\",", "# Assert that there is no task queued for mock_schedule", "is not None assert 2 == log_info.call_count # args, kwargs", "pytest.raises(Exception): await scheduler._get_schedules() log_args = 'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args)", "await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotRunningError) as excinfo:", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\")", "mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.object(scheduler,", "assert schedule.repeat == datetime.timedelta(0, 3600) assert schedule.exclusive is True assert", "as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker): #", "scheduler._max_completed_task_age is None await scheduler._read_config() # THEN assert 1 ==", "tasks that can be running at any given time\", \"type\":", "= mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI", "mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, \"exception\") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval']", "scheduler_fixture(self, mocker): # Changed in version 3.8: patch() now returns", "# THEN # Now confirm no schedule is deleted assert", "async def test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "Schedule %s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self,", "%s, %s running tasks\\n%s\" in args assert 'OMF to PI", "@pytest.mark.asyncio async def test__read_storage(self, mocker): # GIVEN scheduler = Scheduler()", "started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule)", "%s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self, mocker): # GIVEN", "async def test__start_task(self, mocker): # TODO: Mandatory - Add negative", "def test_stop(self, mocker): # TODO: Mandatory - Add negative tests", "PI north' in args2 @pytest.mark.asyncio async def test__schedule_next_task(self, mocker): #", "schedule_row[5] assert schedule.exclusive == schedule_row[7] assert schedule.enabled == schedule_row[8] assert", "id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True,", "return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio,", "await scheduler._start_task(schedule) # THEN # Confirm that task has started", "= await scheduler.get_running_tasks() # THEN assert 1 == len(tasks) assert", "log_info = mocker.patch.object(scheduler._logger, \"info\") return scheduler, schedule, log_info, log_exception, log_error,", "# WHEN # THEN with pytest.raises(ValueError) as ex: temp_schedule =", "that there is no task queued for this schedule at", "mock_task_processes = dict() mock_task_process.process = await asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule =", "@pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\") class TestScheduler: async def scheduler_fixture(self, mocker): # Changed", "assert 'COAP listener south' in args1 assert 'OMF to PI", "time\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\": {", "def delete_from_tbl(cls, table_name, condition=None): pass @classmethod async def query_tbl_with_payload(cls, table_name,", "return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule,", "assert status is True assert message == \"Schedule successfully enabled\"", "\"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North Readings to PI\", \"state\": 1, \"start_time\":", "scheduler.disable_schedule(random_schedule_id) # THEN log_params = \"No such Schedule %s\", str(random_schedule_id)", "\"North Readings to OCS\", \"script\": [ \"tasks/north\", \"--stream_id\", \"4\", \"--debug_level\",", "type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') # WHEN", "= mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) #", "test__get_schedules(self, test_interval, is_exception, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not", "north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) log_exception =", "schedule.process_name == tasks[0].process_name assert tasks[0].reason is None assert tasks[0].state ==", "== \"Schedule is already enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert", "kwargs = log_info.call_args_list[0] assert \"Process started: Schedule '%s' process '%s'", "1 == first_task.call_count assert 1 == resume_sch.call_count assert 0 ==", "= [call('SCHCH', {'schedule': {'name': 'OMF to PI north', 'repeat': 30.0,", "collection' in args0 assert 'COAP listener south' in args1 assert", "disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return", "table_name, query=None): if table_name == 'schedules': return { \"count\": len(MockStorageAsync.schedules),", "'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True)", "scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False,", "%s\\n%s\" in args assert 'OMF to PI north' in args", "# THEN with pytest.raises(Exception): await scheduler._get_schedules() log_args = 'Query failed:", "THEN payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\",", "= {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"format\": \"YYYY-MM-DD", "def test_get_scheduled_processes(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotFoundError) as excinfo: tasks", "disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro(): return", "await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN #", "%s pid %s, %s running tasks\\n%s\" in args assert 'OMF", "temp_schedule.day = 0 temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule", "sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time #", "time), for a rows \" \"in the tasks table that", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler,", "there is no North task queued for schedule with pytest.raises(KeyError)", "Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id", "class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self,", "async def mock_process(): m = MagicMock() m.pid = 9999 m.terminate", "\"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\"", "mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN", "= MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat =", "part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence # \"stat", "calls = [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName':", "await scheduler.disable_schedule(sch_id) # THEN assert status is True assert message", "\"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) schedule_id = uuid.uuid4()", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mock_schedules =", "assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert", "'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker): # GIVEN", "tasks = await scheduler.get_tasks() # THEN payload = {\"return\": [\"id\",", "}, { \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North Readings to PI\", \"schedule_name\":", "def test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN", "one schedule less assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules) @pytest.mark.asyncio", "scheduler._ScheduleExecution) # Changed in version 3.8: patch() now returns an", "tasks[0].end_time is None assert tasks[0].exit_code is None @pytest.mark.asyncio async def", "time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1,", "log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() # WHEN", "is no North task queued for schedule with pytest.raises(KeyError) as", "def test__read_storage(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "\"name\": \"North Readings to PI\", \"script\": [ \"tasks/north\", \"--stream_id\", \"1\",", "tasks table that do not have a status of running\",", "for execution\", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker):", "\"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" }, ]", "assert \"Stopping process: Schedule '%s' process '%s' task %s pid", "mocker.patch.object(scheduler._logger, \"info\") mock_schedules = dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North", "assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker): #", "test_cancel_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "WHEN # Now delete schedule await scheduler.delete_schedule(sch_id) # THEN #", "as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None await", "be tested during System tests.\") async def test__scheduler_loop(self, mocker): pass", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger, \"info\")", "4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 =", "'_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger =", "= \"${VERSION}\" async def mock_task(): return \"\" async def mock_process():", "\"stats collector\", \"script\": [ \"tasks/statistics\" ] }, { \"name\": \"backup\",", "'Query failed: %s' == args[0] p = json.loads(args[1]) assert payload", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) #", "schedule.process_name == \"purge\" @pytest.mark.asyncio async def test_get_schedule_exception(self, mocker): # GIVEN", "{'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0,", "earliest_start_time is not None assert 3 == log_info.call_count args0, kwargs0", "task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0]", "== task.process_name assert task.reason is '' assert task.state is not", "self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() # WHEN # THEN with pytest.raises(ScheduleNotFoundError):", "return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule)", "pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id) # THEN payload = {\"return\":", "task and assert that the task has been queued await", "await scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async", "# WHEN schedules = await scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules)", "test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN # THEN if is_exception", "log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler, schedule,", "log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await", "\"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" }, ] scheduled_processes", "\"script\": [ \"tasks/north\", \"--stream_id\", \"4\", \"--debug_level\", \"1\" ] }, ]", "self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotRunningError) as excinfo: await", "Remove after implementation of above test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task()))", "must be of type datetime.timedelta') with pytest.raises(ValueError) as ex: temp_schedule", "= await mock_process() else: _rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler()", "await scheduler.disable_schedule(random_schedule_id) # THEN log_params = \"No such Schedule %s\",", "\"exception\") # WHEN # THEN with pytest.raises(Exception): await scheduler._get_process_scripts() log_args", "len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1 == audit_logger.call_count calls", "1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time must be of", "queue task and assert that the task has been queued", "assert scheduler._max_completed_task_age is not None @pytest.mark.asyncio async def test_start(self, mocker):", "log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") return scheduler,", "= asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules')", "_rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec',", "enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule)", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused',", "await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1", "= time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time - 3600, _paused=False, _task_processes={})", "schedule_row[1] assert schedule.schedule_type == schedule_row[2] assert schedule_row[3] is 0 #", "task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id]", "= await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotFoundError) as", "pass class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def", "{\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"column\": \"start_time\", \"format\":", "{ \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\", \"schedule_name\": \"backup hourly\", \"schedule_type\": 3,", "no task queued for mock_schedule with pytest.raises(KeyError) as excinfo: assert", "@pytest.mark.asyncio async def test_purge_tasks(self, mocker): # TODO: Mandatory - Add", "schedule.process_name == schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self, mocker): # GIVEN", "None assert task.exit_code is '0' @pytest.mark.skip(\"Need a suitable fixture\") @pytest.mark.asyncio", "sys import copy import pytest from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger,", "} class MockStorageAsync(StorageClientAsync): schedules = [ { \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\":", "= MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") log_exception = mocker.patch.object(scheduler._logger,", "mocker): pass @pytest.mark.asyncio async def test__read_config(self, mocker): async def get_cat():", "True, 'repeat': 30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName':", "{ \"count\": len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks } @classmethod async def query_tbl(cls,", "await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker): # GIVEN scheduler,", "despite mocking _purge_tasks_task, _scheduler_loop_task calls = [call('An exception was raised", "\"start_time\", \"column\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\": \"end_time\", \"column\": \"end_time\",", "message == \"Schedule successfully enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert", "tasks[0].start_time is not None assert tasks[0].end_time is not None assert", "current_time = time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await", "Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule", "# THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_id assert", "mock_schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True #", "test_get_scheduled_processes(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "Confirm that no task has started yet assert 0 ==", "# Now confirm there is one schedule less assert len(scheduler._storage_async.schedules)", "'_schedule_first_task', side_effect=Exception()) # WHEN # THEN with pytest.raises(Exception): await scheduler._get_schedules()", "log_error, log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self, mocker): # GIVEN scheduler", "# WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert isinstance(schedule,", "excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with", "'' assert tasks[0].state is not None assert tasks[0].cancel_requested is None", "See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio import datetime import uuid", "_start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector sch", "mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules()", "test_stop(self, mocker): # TODO: Mandatory - Add negative tests for", "True) # Confirm there are 14 schedules assert len(scheduler._storage_async.schedules) ==", "Interval Schedule assert schedule_row[4] is 0 # 0 for Interval", "task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel request has not", "is None assert tasks[0].exit_code is None @pytest.mark.asyncio async def test_get_task(self,", "WHEN # THEN with pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio", "self.scheduler_fixture(mocker) # Assert that there is no task queued for", "schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule) # THEN", "1 == log_exception.call_count else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True,", "scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params = \"Queued", "async def test_enable_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "= dict() mock_task_process.process = await asyncio.create_subprocess_exec(\"sleep\", \".1\") mock_task_process.schedule = mock_schedule", "scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None,", "async def test_get_tasks_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "not None @pytest.mark.asyncio async def test_start(self, mocker): # TODO: Mandatory", "'scheduled_processes'), call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def", "\"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"end_time\"}, \"reason\", \"exit_code\"], \"where\": {\"column\": \"id\",", "temp_schedule assert str(ex).endswith(\"name can not be empty\") with pytest.raises(ValueError) as", "str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), \"value\": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed in version 3.8:", "this schedule at first with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id]", "'stats collection' in args0 assert 'COAP listener south' in args1", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert", "assert 2 == log_info.call_count calls = [call('No Task running for", "mock_schedules[mock_schedule.id] = mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process", "disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row =", "is_exception, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "# stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] #", "True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker): # GIVEN", "log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler,", "purge schedule # WHEN schedule = await scheduler.get_schedule(schedule_id) # THEN", "= uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North schedule_row = scheduler._ScheduleRow(", "to PI north' in args2 @pytest.mark.asyncio async def test__schedule_next_task(self, mocker):", "'North Readings to PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count calls", "'_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, \"info\")", "for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule '%s/%s' process '%s'\\n\", 'OMF", "0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN", "excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker): # GIVEN", "is not None assert tasks[0].exit_code is '0' @pytest.mark.asyncio async def", "None @pytest.mark.asyncio async def test__check_schedules(self, mocker): # TODO: Mandatory -", "= await scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio", "# GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await", "is %d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls = [call('Database command: %s',", "expression\"), call('An exception was raised by Scheduler._scheduler_loop %s', \"object MagicMock", "mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN # THEN with", "str(task_id)}} args, kwargs = log_exception.call_args assert 'Query failed: %s' ==", "* from fledge.common.storage_client.storage_client import StorageClientAsync __author__ = \"<NAME>\" __copyright__ =", "\"purge\", \"schedule_type\": 4, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\":", "('1 day 12:40:11', False), ('2 days', True), ('2 days 00:00:59',", "scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that", "def test_save_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await", "been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN await scheduler.cancel_task(task_id)", "mock_process() else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future',", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True)", "await scheduler._start_task(schedule) # Confirm that task has started assert 1", "1 == resume_sch.call_count assert 0 == enable_schedule.call_count assert 0 ==", "# WHEN await scheduler.disable_schedule(random_schedule_id) # THEN log_params = \"No such", "mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions = dict() mock_schedule_execution", "= mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions = dict() mock_schedule_execution =", "sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time)", "repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) #", "= mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\")", "time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time - 3600, _paused=False, _task_processes={}) #", "in args assert 'North Readings to PI' in args args,", "to PI' in args @pytest.mark.asyncio async def test_cancel_task_exception(self, mocker): #", "is not None @pytest.mark.asyncio async def test__check_schedules(self, mocker): # TODO:", "for test_save_schedule_update if table_name == \"schedules\": return {\"count\": 1} @classmethod", "async def test_get_scheduled_processes(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert 2 == log_info.call_count", "log_info.call_args_list[0] # assert (\"Queued schedule '%s' for execution\", 'OMF to", "async def test_get_task(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN", "== schedule_id assert schedule.name == \"purge\" assert schedule.schedule_type == Schedule.Type.MANUAL", "PI\", name=\"OMF to PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None,", "= json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_get_tasks(self,", "== audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled':", "scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has", "to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker): #", "- 1 == len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker): #", "time import json from unittest.mock import MagicMock, call import sys", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger =", "core_management_port) def _get_storage_service(self, host, port): return { \"id\": uuid.uuid4(), \"name\":", "\"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"], \"limit\": 100} args, kwargs", "WHEN # Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() #", "async def test__schedule_next_task(self, mocker): # TODO: Mandatory - Add negative", "core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) # WHEN processes =", "= log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2]", "= mocker.patch.object(scheduler._logger, \"info\") await scheduler._get_schedules() sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # backup", "async def test_delete_schedule_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task", "from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client", "PI\") # WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN # After task", "assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'backup", "== len(scheduler._schedules) calls = [call('Starting'), call('Starting Scheduler: Management port received", "# backup await scheduler._get_schedules() # Confirm no. of schedules assert", "= uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North # WHEN status,", "mocker.patch.object(scheduler._logger, \"error\") log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\")", "scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self, mocker): # GIVEN scheduler, schedule,", "mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\",", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task()))", "assert scheduler._last_task_purge_time is not None @pytest.mark.asyncio async def test__check_purge_tasks(self, mocker):", "[call('An exception was raised by Scheduler._purge_tasks %s', \"object MagicMock can't", "= log_info.call_args_list[3] assert 'stats collection' in args0 assert 'COAP listener", "str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler", "= uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) # Confirm", "== len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started", "None assert tasks[0].exit_code is None @pytest.mark.asyncio async def test_get_task(self, mocker):", "MockStorageAsync.tasks } @classmethod async def query_tbl(cls, table_name, query=None): if table_name", "# THEN with pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async", "super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return { \"id\": uuid.uuid4(),", "\"in the tasks table that do not have a status", "be of type datetime.time') with pytest.raises(ValueError) as ex: temp_schedule =", "== sch_id assert (scheduler._schedules[sch_id]).enabled is False assert 2 == log_info.call_count", "schedule, log_info, log_exception, log_error, log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self, mocker):", "THEN # Now confirm no schedule is deleted assert len(scheduler._storage_async.schedules)", "== \"Schedule {} already disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id assert", "\"Process started: Schedule '%s' process '%s' task %s pid %s,", "= uuid.uuid4() # WHEN # THEN with pytest.raises(ScheduleNotFoundError): schedule =", "a suitable fixture\") @pytest.mark.asyncio async def test_get_task_not_found(self, mocker): # GIVEN", "async def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "_task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") # WHEN", "Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker): #", "== resume_sch.call_count assert 0 == enable_schedule.call_count assert 0 == disable_schedule.call_count", "assert 'North Readings to PI' in args args, kwargs =", "'%s/%s' process '%s'\\n\", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert", "args @pytest.mark.asyncio async def test__start_task(self, mocker): # TODO: Mandatory -", "log_info.assert_has_calls(calls, any_order=True) # TODO: Find why these exceptions are being", "'debug', side_effect=Exception()) # WHEN with pytest.raises(Exception) as excinfo: tasks =", "'_schedule_first_task') await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to", "PI' in args @pytest.mark.asyncio async def test_purge_tasks(self, mocker): # TODO:", "\"schedule_name\": \"backup hourly\", \"schedule_type\": 3, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\":", "core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes')", "True assert message == \"Schedule {} already disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id", "= [call('Starting'), call('Starting Scheduler: Management port received is %d', 9999)]", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None await scheduler.save_schedule(temp_schedule) del temp_schedule", "table_name, payload): # Only valid for test_save_schedule_update if table_name ==", "part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is", "def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker): #", "task queued for this schedule at first with pytest.raises(KeyError) as", "'_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", _start_time=current_time -", "as excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4())", "MagicMock() m.pid = 9999 m.terminate = lambda: True return m", "# Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes)", "days', True), ('2 days 00:00:59', False), ('00:25:61', True) ]) async", "1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self, mocker): @asyncio.coroutine def", "await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger,", "schedule_row[3] is 0 # 0 for Interval Schedule assert schedule_row[4]", "assert schedule.repeat == schedule_row[5] assert schedule.exclusive == schedule_row[7] assert schedule.enabled", "get_cat = mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv) # WHEN assert scheduler._max_running_tasks is", "len(MockStorageAsync.tasks), \"rows\": MockStorageAsync.tasks } @classmethod async def query_tbl(cls, table_name, query=None):", "\"--stream_id\", \"4\", \"--debug_level\", \"1\" ] }, ] tasks = [", "\"schedule_type\": 3, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\",", "= Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None)", "1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert", "# WHEN # THEN with pytest.raises(NotReadyError) as excinfo: await scheduler.start()", "= log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection' in", "[\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\":", "\"2018-02-06 13:28:14.477868\", \"end_time\": \"2018-02-06 13:28:14.856375\", \"exit_code\": \"0\", \"reason\": \"\" }", "scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time_before_call", "'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize(\"test_interval, is_exception\", [ ('\"Blah\"", "WHEN # THEN with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id,", "await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) # WHEN processes = await", "scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) # Confirm there are 14 schedules", "queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in version", "assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in version 3.8: patch() now", "schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN", "== p @pytest.mark.asyncio async def test_get_tasks(self, mocker): # GIVEN scheduler,", "Schedule assert schedule_row[4] is 0 # 0 for Interval Schedule", "message = await scheduler.enable_schedule(sch_id) # THEN assert status is True", "[ \"tasks/statistics\" ] }, { \"name\": \"backup\", \"script\": [ \"tasks/backup_postgres\"", "# TODO: Mandatory - Add negative tests for full code", "test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN scheduler,", "assert 'North Readings to PI' in args @pytest.mark.asyncio async def", "Schedule '%s' process '%s' task %s pid %s\\n%s\" in args", "testing. Will be tested during System tests.\") async def test__scheduler_loop(self,", "calls = [call('No Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled", "uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0,", "AsyncMock if the target is an async function. if sys.version_info.major", "table_name, condition=None): pass @classmethod async def query_tbl_with_payload(cls, table_name, query_payload): if", "of type datetime.time') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id,", "00:00:59', False), ('00:25:61', True) ]) async def test__get_schedules(self, test_interval, is_exception,", "\"service_port\": 9999, \"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } class", "{'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day':", "assert time_after_call > time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count args0, kwargs0", "None assert scheduler._ready is False assert scheduler._paused is False assert", "= mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") return scheduler, schedule,", "schedule_row) temp_schedule.name = \"\" await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name", "next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call = sch_execution.next_start_time #", "self.scheduler_fixture(mocker) schedule_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\") # purge schedule # WHEN schedule", "with pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo:", "used in 'await' expression\"), call('An exception was raised by Scheduler._scheduler_loop", "north' in args2 @pytest.mark.asyncio async def test__schedule_next_task(self, mocker): # TODO:", "\"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\": \"end_time\", \"column\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"},", "\"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\",", "= uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger", "core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger, \"info\") await scheduler._get_schedules()", "\"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"start_time\"}, {\"alias\": \"end_time\", \"format\": \"YYYY-MM-DD", "unit test.\") @pytest.mark.asyncio async def test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync):", "pid %s, %s running tasks\\n%s\" in args assert 'OMF to", "13:28:14.477868\", \"end_time\": \"2018-02-06 13:28:14.856375\", \"exit_code\": \"0\", \"reason\": \"\" } ]", "ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending", "been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that", "enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True log_params", "Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule '%s/%s' process", "async def test_not_ready_and_paused(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time is not None @pytest.mark.asyncio async", "\"t\", \"enabled\": \"t\" }, { \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North Readings", "scheduler._ScheduleExecution) # log_params = \"Queued schedule '%s' for execution\", 'purge'", "len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker):", "mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process = await", "Readings to PI' in args @pytest.mark.asyncio async def test__start_task(self, mocker):", "'North Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self,", "as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None await", "time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") #", "scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async", "scheduler.delete_schedule(sch_id) # THEN # Now confirm no schedule is deleted", "assert payload == p @pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker): #", "that the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id],", "import pytest from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities", "args, kwargs = log_info.call_args_list[0] # assert (\"Queued schedule '%s' for", "dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process", "type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') # WHEN", "[call('No Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule '%s/%s'", "THEN with pytest.raises(TaskNotFoundError) as excinfo: tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio", "tasks = [ { \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North Readings to", "await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready',", "WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio", "{ \"id\": uuid.uuid4(), \"name\": \"Fledge Storage\", \"type\": \"Storage\", \"service_port\": 9999,", "as excinfo: await scheduler.start() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes()", "is True @pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker): # GIVEN scheduler", "mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time - 3600) # TODO: Remove after", "completion, sleep above, no task processes should be left pending", "0, \"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\":", "} @classmethod async def query_tbl(cls, table_name, query=None): if table_name ==", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id ==", "len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not implemented in main Scheduler", "THEN assert 1 == len(tasks) assert schedule.process_name == tasks[0].process_name assert", "scheduler.get_tasks() # THEN payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\",", "MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mock_schedules = dict() mock_schedule", "given time\", \"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\":", "@pytest.mark.asyncio async def test_get_task(self, mocker): # GIVEN scheduler, schedule, log_info,", "mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused', True) # WHEN # THEN", "as excinfo: tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self,", "with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name =", "len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker): # GIVEN", "9999, \"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } class MockStorageAsync(StorageClientAsync):", "\"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\": { \"description\": \"The maximum age, in", "== p @pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker): # GIVEN scheduler,", "'%s' process '%s' task %s pid %s, %s running tasks\\n%s\"", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info =", "time_before_call assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1,", "False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls,", "WHEN await scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None", "excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules() with", "# THEN assert schedule.process_name == task.process_name assert task.reason is ''", "# Confirm that no task has started yet assert 0", "Scheduler class.\") async def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async def", "True @pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker): # GIVEN scheduler =", "MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes)", "= await get_cat() else: _rv = asyncio.ensure_future(get_cat()) # GIVEN scheduler", "assert task.start_time is not None assert task.end_time is not None", "WHEN # THEN with pytest.raises(TaskNotFoundError) as excinfo: tasks = await", "== log_info.call_count # args, kwargs = log_info.call_args_list[0] # assert (\"Queued", "uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id,", "{'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True,", "assert tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested is None assert tasks[0].start_time", "args3 @pytest.mark.asyncio async def test__get_process_scripts(self, mocker): # GIVEN scheduler =", "done\") async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def test_disable_schedule(self, mocker):", "test_not_ready_and_paused(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id) # THEN log_params = \"No", "assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self,", "# THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self,", "scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North Readings to PI\", name=\"OMF to PI north\",", "that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN", "== len(scheduler._schedules) # WHEN # Now delete schedule with pytest.raises(RuntimeWarning):", "_purge_tasks_task, _scheduler_loop_task calls = [call('An exception was raised by Scheduler._purge_tasks", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks',", "assert scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time is not None @pytest.mark.asyncio", "@pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker): # GIVEN scheduler, schedule, log_info,", "PI\", \"script\": [ \"tasks/north\", \"--stream_id\", \"1\", \"--debug_level\", \"1\" ] },", "del temp_schedule assert str(ex).endswith('day must be between 1 and 7')", "PI' in args args, kwargs = log_info.call_args_list[1] assert \"Stopping process:", "ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = \"\" await scheduler.save_schedule(temp_schedule)", "THEN assert status is True assert message == \"Schedule is", "log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection' in args0", "disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self, mocker): # GIVEN scheduler, schedule,", "# 0 for Interval Schedule assert schedule_row[4] is 0 #", "resume_sch.call_count assert 0 == enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False)", "process '%s' task %s pid %s, %s running tasks\\n%s\" in", "mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules,", "with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN # Now confirm no", "@pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker): # GIVEN scheduler = Scheduler()", "\"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\", \"schedule_name\": \"COAP listener south\", \"schedule_type\": 1, \"schedule_interval\":", "uuid.uuid4(), \"name\": \"Fledge Storage\", \"type\": \"Storage\", \"service_port\": 9999, \"management_port\": 9999,", "collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time", "= MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, \"exception\") new_schedules", "valid for test_save_schedule_update if table_name == \"schedules\": return {\"count\": 1}", "@pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler, schedule, log_info,", "schedule '%s' for execution\", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def", "# THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert 2 ==", "process_name=\"North Readings to PI\", name=\"OMF to PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30),", "Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info", "with pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo:", "asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is False # WHEN", "with pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id) # THEN payload =", "(scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True assert 1 ==", "\"start_time\": \"2018-02-06 13:28:14.477868\", \"end_time\": \"2018-02-06 13:28:14.856375\", \"exit_code\": \"0\", \"reason\": \"\"", "def mock_task(): return \"\" async def mock_process(): m = MagicMock()", "message == \"Schedule {} already disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id", "unittest.mock import MagicMock, call import sys import copy import pytest", "assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count calls =", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0 temp_schedule.time = datetime.time() await", "tasks[0].exit_code is None @pytest.mark.asyncio async def test_get_task(self, mocker): # GIVEN", "async def mock_task(): return \"\" async def mock_process(): m =", "= mocker.patch.object(scheduler._logger, \"error\") log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger,", "await get_cat() else: _rv = asyncio.ensure_future(get_cat()) # GIVEN scheduler =", "self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN # THEN", "call import sys import copy import pytest from fledge.services.core.scheduler.scheduler import", "scheduler._ready is False # WHEN await scheduler.start() # THEN assert", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now())", "def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_stop(self, mocker): # TODO: Mandatory", "scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can not be empty\") with", "1 == log_exception.call_count log_params = 'Attempt to delete an enabled", "MagicMock, call import sys import copy import pytest from fledge.services.core.scheduler.scheduler", "@classmethod async def update_tbl(cls, table_name, payload): # Only valid for", "pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self, mocker):", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception", "log_info = mocker.patch.object(scheduler._logger, \"info\") mock_schedules = dict() mock_schedule = scheduler._ScheduleRow(", "\"t\" }, { \"id\": \"5d7fed92-fb9a-11e7-8c3f-9a214cf093ae\", \"process_name\": \"North Readings to OCS\",", "async def test__schedule_next_timed_task(self, mocker): # TODO: Mandatory - Add negative", "'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0,", "def test_get_task(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule =", "@pytest.mark.skip(\"_scheduler_loop() not suitable for unit testing. Will be tested during", "with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat =", "log_info = mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule =", "kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection'", "task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 ==", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") log_exception", "PI north' in args2 @pytest.mark.asyncio @pytest.mark.skip(\"_scheduler_loop() not suitable for unit", "Task.State.RUNNING assert tasks[0].cancel_requested is None assert tasks[0].start_time is not None", "len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel request has", "1 == len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule':", "await scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time", "\"type\": \"integer\", \"default\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), \"value\": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, \"max_completed_task_age_days\": { \"description\":", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\")", "scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time # THEN", "THEN if is_exception is True: with pytest.raises(Exception): await scheduler._get_schedules() assert", "received is %d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls = [call('Database command:", "mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes,", "Now queue task and assert that the North task has", "not None assert task.end_time is not None assert task.exit_code is", "\"script\": [ \"services/south\" ] }, { \"name\": \"North Readings to", "m = MagicMock() m.pid = 9999 m.terminate = lambda: True", "import time import json from unittest.mock import MagicMock, call import", "= mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler,", "to PI' in args @pytest.mark.asyncio async def test__start_task(self, mocker): #", "log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0] assert \"Process started: Schedule '%s'", "12:30:11', False), ('1 day 12:40:11', False), ('2 days', True), ('2", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler,", "# THEN assert status is True assert message == \"Schedule", "failed: %s' == args[0] p = json.loads(args[1]) assert payload ==", "in days (based on the start time), for a rows", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") log_exception =", "12:40:11', False), ('2 days', True), ('2 days 00:00:59', False), ('00:25:61',", "== disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro():", "THEN with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name", "await scheduler.stop() # THEN assert retval is True assert scheduler._schedule_executions", "MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.multiple(scheduler,", "_core_management_port=9999, _core_management_host=\"0.0.0.0\", current_time=current_time - 3600) # TODO: Remove after implementation", "is True assert scheduler._schedule_executions is None assert scheduler._task_processes is None", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception =", "len(scheduler._schedules) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name':", "datetime.timedelta') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive", "\"script\": [ \"tasks/purge\" ] }, { \"name\": \"stats collector\", \"script\":", "log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler,", "time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) #", "len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0] assert", "test__get_process_scripts_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count calls = [call('SCHCH',", "Assert that there is no task queued for schedule with", "as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker): #", "= await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task()))", "[ \"tasks/north\", \"--stream_id\", \"1\", \"--debug_level\", \"1\" ] }, { \"name\":", "assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params = \"Queued schedule '%s' for", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN # Check IF", "len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self, mocker): # GIVEN", "mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv) # WHEN", "0 # 0 for Interval Schedule assert schedule_row[4] is 0", "True: with pytest.raises(Exception): await scheduler._get_schedules() assert 1 == log_exception.call_count else:", "'OMF to PI north' in args2 # As part of", "# THEN with pytest.raises(Exception): await scheduler._get_process_scripts() log_args = 'Query failed:", "async def test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "\"Storage\", \"service_port\": 9999, \"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" }", "payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\", \"state\", {\"alias\": \"start_time\", \"column\":", "schedule '%s' for execution\", 'OMF to PI north') == args", "assert that the task has been queued await scheduler.queue_task(schedule.id) assert", "be used in 'await' expression\"), call('An exception was raised by", "del temp_schedule assert str(ex).endswith('exclusive can not be None') with pytest.raises(ValueError)", "self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task')", "None assert task.start_time is not None assert task.end_time is not", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task')", "False) mocker.patch.object(scheduler, '_paused', True) # WHEN # THEN with pytest.raises(NotReadyError)", "WHEN status, message = await scheduler.enable_schedule(sch_id) # THEN assert status", "# WHEN scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time # THEN assert", "'_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info", "'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1,", "time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') #", "day 00:00:40\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\"", "= await scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio", "%s pid %s\\n%s\" in args assert 'OMF to PI north'", "@pytest.mark.skip(\"_mark_tasks_interrupted() not implemented in main Scheduler class.\") async def test__mark_tasks_interrupted(self,", "exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes =", "task.process_name assert task.reason is '' assert task.state is not None", "asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async =", "# Confirm that task has not started yet assert 0", "%s', \"object MagicMock can't be used in 'await' expression\"), call('An", "assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is None assert tasks[0].state", "return \"\" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "= [call('Database command: %s', 'scheduled_processes'), call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls,", "audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id", "= uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message", "in args @pytest.mark.asyncio async def test_purge_tasks(self, mocker): # TODO: Mandatory", "query=None): if table_name == 'schedules': return { \"count\": len(MockStorageAsync.schedules), \"rows\":", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\")", "enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False)", "\"OMF to PI north\", \"schedule_type\": 3, \"schedule_interval\": \"00:00:30\", \"schedule_time\": \"\",", "= \"Schedule %s already disabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def", "sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN next_dt =", "p = json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def", "delete an enabled Schedule %s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio", "= await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id)", "THEN with pytest.raises(Exception): await scheduler._get_process_scripts() log_args = 'Query failed: %s',", "Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler,", "deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == log_exception.call_count log_params", "\"=\", \"value\": str(task_id)}} args, kwargs = log_exception.call_args assert 'Query failed:", "_paused=False, _task_processes={}) # WHEN retval = await scheduler.stop() # THEN", "mocker.patch.object(scheduler._logger, \"info\") return scheduler, schedule, log_info, log_exception, log_error, log_debug @pytest.mark.asyncio", "]) async def test__get_schedules(self, test_interval, is_exception, mocker): # GIVEN scheduler", "WHEN # THEN with pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio", "assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started", "0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def", "scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert", "\"0\", \"reason\": \"\" } ] def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host,", "assert 'stats collection' in args3 @pytest.mark.asyncio async def test__schedule_first_task(self, mocker):", "def test_get_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id)", "assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule) # THEN", "\"backup\", \"schedule_name\": \"backup hourly\", \"schedule_type\": 3, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\",", "is True log_params = \"Schedule %s already enabled\", str(sch_id) log_info.assert_called_with(*log_params)", "WHEN tasks = await scheduler.get_tasks() # THEN assert schedule.process_name ==", "mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN await scheduler.purge_tasks()", "def update_tbl(cls, table_name, payload): # Only valid for test_save_schedule_update if", "assert schedule.exclusive is True assert schedule.enabled is True assert schedule.process_name", "= mocker.patch.object(scheduler._logger, \"info\") log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task()))", "8: _rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio,", "@pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker): # TODO: Mandatory - Add", "== sch_id assert (scheduler._schedules[sch_id]).enabled is False log_params = \"Schedule %s", "that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes)", "to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv)", "pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self, mocker):", "\"info\") sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN status, message", "test_delete_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "(c) 2017 OSIsoft, LLC\" __license__ = \"Apache 2.0\" __version__ =", "\"2018-02-06 13:28:14.856375\", \"exit_code\": \"0\", \"reason\": \"\" } ] def __init__(self,", "scheduler.disable_schedule(sch_id) # THEN assert status is True assert message ==", "1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName':", "# WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 ==", "log_info, log_exception, log_error, log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self, mocker): #", "return { \"count\": len(MockStorageAsync.schedules), \"rows\": MockStorageAsync.schedules } if table_name ==", "uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions = dict()", "# THEN assert time_after_call > time_before_call assert 3 == log_info.call_count", "WHEN # THEN if is_exception is True: with pytest.raises(Exception): await", "= MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger,", "scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id", "temp_schedule.name = \"\" await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith(\"name can", "PI\") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that task has not started", "MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info =", "== args args, kwargs = log_info.call_args_list[0] assert \"Process started: Schedule", "is not None @pytest.mark.asyncio async def test__check_purge_tasks(self, mocker): # TODO:", "running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule '%s/%s' process '%s'\\n\",", "await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError)", "core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mock_schedules = dict() mock_schedule =", "scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) #", "mocker): @asyncio.coroutine def mock_coro(): return \"\" # GIVEN scheduler, schedule,", "await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules)", "# WHEN # THEN with pytest.raises(Exception): await scheduler._get_process_scripts() log_args =", "uuid import time import json from unittest.mock import MagicMock, call", "str(ex).endswith(\"name can not be empty\") with pytest.raises(ValueError) as ex: temp_schedule", "started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm", "\"t\" }, { \"id\": \"d1631422-9ec6-11e7-abc4-cec278b6b50a\", \"process_name\": \"backup\", \"schedule_name\": \"backup hourly\",", "}, { \"name\": \"North Readings to OCS\", \"script\": [ \"tasks/north\",", "new_schedules) # WHEN # THEN if is_exception is True: with", "'_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is True @pytest.mark.asyncio", "\"0.0.0.0\", \"protocol\": \"http\" } class MockStorageAsync(StorageClientAsync): schedules = [ {", "_rv = asyncio.ensure_future(get_cat()) # GIVEN scheduler = Scheduler() scheduler._storage =", "sch_execution = scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time", "3, \"schedule_interval\": \"01:00:00\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\":", "collector\", \"schedule_name\": \"stats collection\", \"schedule_type\": 2, \"schedule_interval\": \"00:00:15\", \"schedule_time\": \"00:00:15\",", "self.scheduler_fixture(mocker) # WHEN schedules = await scheduler.get_schedules() # THEN assert", "is True: with pytest.raises(Exception): await scheduler._get_schedules() assert 1 == log_exception.call_count", "enabled=True, process_name='TestProcess') # WHEN # THEN with pytest.raises(ValueError) as ex:", "schedule.schedule_id == schedule_row[0] assert schedule.name == schedule_row[1] assert schedule.schedule_type ==", "= json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_cancel_task_all_ok(self,", "- 3600, _paused=False, _task_processes={}) # WHEN retval = await scheduler.stop()", "datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day must be between", "THEN assert schedule.process_name == task.process_name assert task.reason is '' assert", "assert schedule.enabled is True assert schedule.process_name == \"purge\" @pytest.mark.asyncio async", "'OMF to PI north', 'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL,", "resume_sch.call_count assert 1 == enable_schedule.call_count assert 0 == disable_schedule.call_count #", "= scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = \"\" await scheduler.save_schedule(temp_schedule) del temp_schedule", "in args assert 'OMF to PI north' in args assert", "= mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) schedule_id", "\"${VERSION}\" async def mock_task(): return \"\" async def mock_process(): m", "await scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules)", "assert message == \"Schedule successfully disabled\" assert (scheduler._schedules[sch_id]).id == sch_id", "with pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self,", "async def test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler = Scheduler() schedule_id", "'_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\") schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF", "import MagicMock, call import sys import copy import pytest from", "= uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0,", "= mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv) # WHEN assert scheduler._max_running_tasks is None", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert", "\"exit_code\"], \"limit\": 100} args, kwargs = log_exception.call_args assert 'Query failed:", "log_info.call_args_list[0] assert \"Process started: Schedule '%s' process '%s' task %s", "'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") #", "1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self, mocker): # GIVEN", "mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process", "temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None await scheduler.save_schedule(temp_schedule) del", "== queue_task.call_count calls = [call(\"Enabled Schedule '%s/%s' process '%s'\\n\", 'backup", "True) # WHEN # Now delete schedule await scheduler.delete_schedule(sch_id) #", "# THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self,", "exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await", "pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234", "}, ] tasks = [ { \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\", \"process_name\": \"North", "False), ('2 days', True), ('2 days 00:00:59', False), ('00:25:61', True)", "less assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules) @pytest.mark.asyncio async def", "retval = await scheduler.stop() # THEN assert retval is True", "schedule # WHEN schedule = await scheduler.get_schedule(schedule_id) # THEN assert", "with pytest.raises(Exception): await scheduler._get_schedules() log_args = 'Query failed: %s', 'schedules'", "\"t\", \"enabled\": \"f\" }, { \"id\": \"ada12840-68d3-11e7-907b-a6006ad3dba0\", \"process_name\": \"COAP\", \"schedule_name\":", "disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True)", "[call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive':", "to OCS\", \"schedule_name\": \"OMF to OCS north\", \"schedule_type\": 3, \"schedule_interval\":", "3, \"schedule_interval\": \"00:00:30\", \"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\":", "LLC\" __license__ = \"Apache 2.0\" __version__ = \"${VERSION}\" async def", "log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert", "] }, { \"name\": \"North Readings to PI\", \"script\": [", "def test_get_schedules(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) #", "@pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler = Scheduler()", "THEN assert scheduler._purge_tasks_task is not None @pytest.mark.asyncio async def test__check_schedules(self,", "enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\",", "earliest_start_time = await scheduler._check_schedules() # THEN assert earliest_start_time is not", "assert 1 == log_exception.call_count log_params = 'Attempt to delete an", "schedule.name == \"purge\" assert schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat ==", "Assert that there is no task queued for this schedule", "get_cat.call_count assert scheduler._max_running_tasks is not None assert scheduler._max_completed_task_age is not", "not None assert scheduler._max_completed_task_age is not None @pytest.mark.asyncio async def", "assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'OMF", "repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN", "Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker): #", "%s running tasks\\n%s\" in args assert 'OMF to PI north'", "log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"ada12840-68d3-11e7-907b-a6006ad3dba0\") #Coap mocker.patch.object(scheduler,", "0 == len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs =", "args, kwargs = log_exception.call_args assert 'Query failed: %s' == args[0]", "\"info\") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\"), process_name=\"North", "\"schedule_name\", \"state\", {\"alias\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"start_time\"}, {\"alias\":", "not suitable for unit testing. Will be tested during System", "mock_coro(): return \"\" # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "schedule await scheduler.delete_schedule(sch_id) # THEN # Now confirm there is", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN", "assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1", "await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger,", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info", "as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4())", "Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler,", "len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler, schedule,", "scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError) as", "@pytest.mark.parametrize(\"test_interval, is_exception\", [ ('\"Blah\" 0 days', True), ('12:30:11', False), ('0", "mocker.patch.object(scheduler._logger, \"exception\") log_error = mocker.patch.object(scheduler._logger, \"error\") log_debug = mocker.patch.object(scheduler._logger, \"debug\")", "= 'Attempt to delete an enabled Schedule %s. Not deleted.',", "test_get_task_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "# log_params = \"Queued schedule '%s' for execution\", 'purge' #", "Now queue task and assert that the task has been", "scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None,", "assert time_after_call > time_before_call assert 4 == log_info.call_count args0, kwargs0", "'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert", "\"management_port\": 9999, \"address\": \"0.0.0.0\", \"protocol\": \"http\" } @classmethod async def", "MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules()", "core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN", "len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self, mocker): # GIVEN scheduler =", "must be between 1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To be done\")", "mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is False", "_rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None)", "THEN assert scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time is not None", "is None await scheduler._read_config() # THEN assert 1 == cr_cat.call_count", "\"column\": \"start_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, {\"alias\": \"end_time\", \"column\": \"end_time\", \"format\":", "= await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task =", "{ \"name\": \"COAP\", \"script\": [ \"services/south\" ] }, { \"name\":", "== len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel request", "audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type':", "MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") log_exception = mocker.patch.object(scheduler._logger, \"exception\")", "\"exception\") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1)))", "m.pid = 9999 m.terminate = lambda: True return m @pytest.allure.feature(\"unit\")", "mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id", "\"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"], \"limit\": 100} args, kwargs = log_exception.call_args", "\"Apache 2.0\" __version__ = \"${VERSION}\" async def mock_task(): return \"\"", "assert earliest_start_time is not None assert 3 == log_info.call_count args0,", "len(scheduler._schedules) assert 1 == log_exception.call_count log_params = 'Attempt to delete", "confirm no schedule is deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert", "1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks =", "delete_from_tbl(cls, table_name, condition=None): pass @classmethod async def query_tbl_with_payload(cls, table_name, query_payload):", "scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch)", "there is one schedule less assert len(scheduler._storage_async.schedules) - 1 ==", "test__wait_for_task_completion(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup await scheduler._get_schedules() # Confirm no.", "# WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules)", "name=\"OMF to PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True,", "assert schedule.enabled == schedule_row[8] assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio async", "@pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler = Scheduler()", "= mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger,", "= \"Schedule %s already enabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def", "core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, \"info\") current_time", "WHEN # Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() #", "excinfo: tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self, mocker):", "no task queued for schedule with pytest.raises(KeyError) as excinfo: assert", "_core_management_host=\"0.0.0.0\", current_time=current_time - 3600) # TODO: Remove after implementation of", "mocker): # GIVEN scheduler = Scheduler() schedule_id = uuid.uuid4() schedule_row", "if the target is an async function. if sys.version_info.major ==", "== args[0] p = json.loads(args[1]) assert payload == p @pytest.mark.asyncio", "implementation of above test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready", "'_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule", "scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) ==", "are being raised despite mocking _purge_tasks_task, _scheduler_loop_task calls = [call('An", "name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess')", "mocker.patch.object(scheduler, \"disable_schedule\", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule) # THEN assert", "'_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger, \"info\") sch_id =", "scheduler._wait_for_task_completion(mock_task_process) # THEN # After task completion, sleep above, no", "do not have a status of running\", \"type\": \"integer\", \"default\":", "= MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN # Check IF part mocker.patch.object(scheduler,", "# WHEN # THEN with pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4())", "MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN", "\"Schedule is already enabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled", "'_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to", "start time), for a rows \" \"in the tasks table", "1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'backup hourly',", "await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not fit for unit test.\") @pytest.mark.asyncio async", "that there is no North task queued for schedule with", "assert scheduler._task_processes is None assert scheduler._schedules is None assert scheduler._process_scripts", "= MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception =", "\"schedule_time\": \"\", \"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"f\" }, {", "13:28:14.856375\", \"exit_code\": \"0\", \"reason\": \"\" } ] def __init__(self, core_management_host=None,", "# WHEN earliest_start_time = await scheduler._check_schedules() # THEN assert earliest_start_time", "str(ex).endswith('exclusive can not be None') with pytest.raises(ValueError) as ex: temp_schedule", "twice in this list. assert 'stats collection' in args3 @pytest.mark.asyncio", "1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To be done\") async def test_remove_service_from_task_processes(self):", "# WHEN scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task is not None", "is False # WHEN await scheduler.start() # THEN assert scheduler._ready", "scheduler.get_running_tasks() # THEN assert 1 == len(tasks) assert schedule.process_name ==", "is not None assert tasks[0].end_time is None assert tasks[0].exit_code is", "OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule',", "log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN schedules = await", "async def test__get_schedules_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "log_params = \"No such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async", "curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id =", "None assert tasks[0].start_time is not None assert tasks[0].end_time is not", "= mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") # WHEN", "await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time must be of type", "args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection' in args0 assert", "core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\")", "\"The maximum age, in days (based on the start time),", "scheduler = Scheduler() schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id,", "0 == enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def", "assert scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules)", "len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip(\"_mark_tasks_interrupted() not implemented in", "def test_get_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error,", "in args3 @pytest.mark.asyncio async def test__get_process_scripts(self, mocker): # GIVEN scheduler", "test__schedule_next_task(self, mocker): # TODO: Mandatory - Add negative tests for", "\"process_name\": \"COAP\", \"schedule_name\": \"COAP listener south\", \"schedule_type\": 1, \"schedule_interval\": \"00:00:00\",", "# WHEN assert scheduler._max_running_tasks is None assert scheduler._max_completed_task_age is None", "excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks() with", "such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker):", "any_order=True) calls = [call('Database command: %s', 'scheduled_processes'), call('Database command: %s',", "with pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self,", "any_order=True) # TODO: Find why these exceptions are being raised", "Schedule) assert schedule.schedule_id == schedule_id assert schedule.name == \"purge\" assert", "scheduler._schedule_executions[sch_id] is True # WHEN await scheduler.queue_task(sch_id) # THEN assert", "WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count", "str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler,", "table_name == 'scheduled_processes': return { \"count\": len(MockStorageAsync.scheduled_processes), \"rows\": MockStorageAsync.scheduled_processes }", "pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker): # TODO: Mandatory -", "log_debug = mocker.patch.object(scheduler._logger, \"debug\", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, \"exception\") #", "False # WHEN await scheduler.start() # THEN assert scheduler._ready is", "= mocker.patch.object(scheduler._logger, \"info\") enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule =", "\"http\" } @classmethod async def insert_into_tbl(cls, table_name, payload): pass @classmethod", "log_info = mocker.patch.object(scheduler._logger, \"info\") schedule_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to", "> time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0]", "if is_exception is True: with pytest.raises(Exception): await scheduler._get_schedules() assert 1", "= scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call =", "\"schedule_type\": 3, \"schedule_interval\": \"1 day 00:00:40\", \"schedule_time\": \"\", \"schedule_day\": 0,", "await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused', True) # WHEN", "\"exit_code\": \"0\", \"reason\": \"\" } ] def __init__(self, core_management_host=None, core_management_port=None):", "1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel", "== disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self, mocker): # GIVEN scheduler,", "scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, \"exception\")", "WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time # THEN assert time_after_call >", "excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes()", "\"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North", "successfully disabled\" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False", "await scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params =", "time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time.mktime(curr_time.timetuple()) assert", "# Now delete schedule await scheduler.delete_schedule(sch_id) # THEN # Now", "True assert scheduler._schedule_executions is None assert scheduler._task_processes is None assert", "core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, \"debug\") log_info = mocker.patch.object(scheduler._logger, \"info\") current_time", "PI north', 'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True,", "# -*- coding: utf-8 -*- # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/", "async def test_get_running_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception,", "Schedule '%s/%s' process '%s'\\n\", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True)", "type datetime.time') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row)", "\"1\" ] }, ] tasks = [ { \"id\": \"259b8570-65c1-4b92-8c62-e9642631a600\",", "async def test__resume_check_schedules(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage", "mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, \"info\")", "\"format\": \"YYYY-MM-DD HH24:MI:SS.MS\", \"column\": \"start_time\"}, {\"alias\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\",", "= await scheduler.get_tasks() # THEN payload = {\"return\": [\"id\", \"process_name\",", "= scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process = await asyncio.create_subprocess_exec(\"sleep\", \".1\")", "as excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL))", "\"process_name\": \"stats collector\", \"schedule_name\": \"stats collection\", \"schedule_type\": 2, \"schedule_interval\": \"00:00:15\",", "PI north\", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id]", "True # Now queue task and assert that the task", "scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules)", "7') @pytest.mark.asyncio @pytest.mark.skip(reason=\"To be done\") async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio", "uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North # WHEN status, message", "\"No such Schedule %s\", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self,", "# THEN task_id = uuid.uuid4() with pytest.raises(Exception) as excinfo: await", "\"process_name\": \"North Readings to OCS\", \"schedule_name\": \"OMF to OCS north\",", "MockStorageAsync(StorageClientAsync): schedules = [ { \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\", \"schedule_name\":", "log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4()", "mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN with pytest.raises(Exception) as excinfo: tasks", "assert tasks[0].end_time is not None assert tasks[0].exit_code is '0' @pytest.mark.asyncio", "\"info\") current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler,", "raised by Scheduler._purge_tasks %s', \"object MagicMock can't be used in", "= scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time #", "0 # 0 for Interval Schedule assert schedule.repeat == schedule_row[5]", "len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker): # GIVEN scheduler =", "\"object MagicMock can't be used in 'await' expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio", "False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North Readings to PI'}})]", "mocker.patch.object(scheduler, '_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger, \"info\") await scheduler._get_schedules() sch_id", "for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage", "is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert", "repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process =", "THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self, mocker):", "scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip(\"_terminate_child_processes() not fit", "\"00:00:15\", \"schedule_time\": \"00:00:15\", \"schedule_day\": 3, \"exclusive\": \"f\", \"enabled\": \"t\" },", "is '' assert task.state is not None assert task.cancel_requested is", "'%s'\\n\", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 ==", "assert message == \"Schedule is already enabled\" assert (scheduler._schedules[sch_id]).id ==", "scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as", "@pytest.mark.asyncio async def test_get_tasks_exception(self, mocker): # GIVEN scheduler, schedule, log_info,", "async def scheduler_fixture(self, mocker): # Changed in version 3.8: patch()", "# Assert that there is no task queued for this", "@pytest.mark.asyncio async def test_save_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info,", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = \"\" await scheduler.save_schedule(temp_schedule) del temp_schedule assert", "expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): # GIVEN scheduler", "log_info = mocker.patch.object(scheduler._logger, \"info\") log_exception = mocker.patch.object(scheduler._logger, \"exception\") mocker.patch.object(scheduler, '_scheduler_loop',", "@pytest.mark.skip(\"_terminate_child_processes() not fit for unit test.\") @pytest.mark.asyncio async def test__terminate_child_processes(self,", "\"Queued schedule '%s' for execution\", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async", "to PI north' in args assert 'North Readings to PI'", "WHEN await scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert", "call(\"Disabled Schedule '%s/%s' process '%s'\\n\", 'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34',", "assert scheduler._schedule_executions is None assert scheduler._task_processes is None assert scheduler._schedules", "0 temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day", "\"tasks/purge\" ] }, { \"name\": \"stats collector\", \"script\": [ \"tasks/statistics\"", "MagicMock can't be used in 'await' expression\")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async", "= 9999 m.terminate = lambda: True return m @pytest.allure.feature(\"unit\") @pytest.allure.story(\"scheduler\")", "scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as", "PI\", \"schedule_name\": \"OMF to PI north\", \"schedule_type\": 3, \"schedule_interval\": \"00:00:30\",", "= mocker.patch.object(ConfigurationManager, \"create_category\", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, \"get_category_all_items\", return_value=_rv) #", "pytest.raises(Exception): await scheduler._get_process_scripts() log_args = 'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args)", "ConfigurationManager from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import * from", "assert message == \"Schedule {} already disabled\".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id ==", "%s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call(\"Disabled Schedule '%s/%s' process '%s'\\n\", 'OMF to PI", "hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled':", "temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive can", "\" \"in the tasks table that do not have a", "assert 0 == enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async", "schedule.schedule_type == schedule_row[2] assert schedule_row[3] is 0 # 0 for", "\"info\") sch_id = uuid.UUID(\"2b614d26-760f-11e7-b5a5-be2e44b06b34\") # OMF to PI North #", "# WHEN status, message = await scheduler.enable_schedule(sch_id) # THEN assert", "time=None, day=None, exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger, \"exception\") log_error =", "return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info", "test_get_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug", "listener south' in args1 assert 'OMF to PI north' in", "# WHEN await scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts)", "\"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\", \"process_name\": \"North Readings to PI\", \"schedule_name\": \"OMF to", "= uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") # stat collector sch = scheduler._schedules[sch_id] sch_execution =", "scheduler._max_running_tasks is not None assert scheduler._max_completed_task_age is not None @pytest.mark.asyncio", "scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert", "self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await", "log_info.call_args_list[1] assert \"Stopping process: Schedule '%s' process '%s' task %s", "schedule_row) enable_schedule = mocker.patch.object(scheduler, \"enable_schedule\", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, \"disable_schedule\",", "await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup await scheduler._get_schedules() #", "not None @pytest.mark.asyncio async def test__check_purge_tasks(self, mocker): # TODO: Mandatory", "\"name\": \"Fledge Storage\", \"type\": \"Storage\", \"service_port\": 9999, \"management_port\": 9999, \"address\":", "temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time must", "# THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1", "repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule", "Readings to PI\") # WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN #", "is not None assert tasks[0].cancel_requested is None assert tasks[0].start_time is", "await scheduler.get_task(task_id) # THEN payload = {\"return\": [\"id\", \"process_name\", \"schedule_name\",", "log_info.call_args_list[2] assert 'stats collection' in args0 assert 'COAP listener south'", "id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True,", "assert 1 == log_info.call_count # assert call(\"Queued schedule '%s' for", "day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id,", "FLEDGE_END import asyncio import datetime import uuid import time import", "Management port received is %d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls =", "\"exception\") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN # THEN with pytest.raises(Exception):", "mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler,", "calls = [call('Database command: %s', 'scheduled_processes'), call('Database command: %s', 'schedules')]", "sch_execution, next_dt) time_after_call = sch_execution.next_start_time # THEN assert time_after_call >", "2 == log_info.call_count calls = [call('No Task running for Schedule", "log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup await", "WHEN await scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio", "def insert_into_tbl(cls, table_name, payload): pass @classmethod async def update_tbl(cls, table_name,", "already enabled\", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker): #", "enabled=True) log_exception = mocker.patch.object(scheduler._logger, \"exception\") log_error = mocker.patch.object(scheduler._logger, \"error\") log_debug", "# log_info = mocker.patch.object(scheduler._logger, \"info\") await scheduler._get_schedules() sch_id = uuid.UUID(\"cea17db8-6ccc-11e7-907b-a6006ad3dba0\")", "= await scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id", "maximum age, in days (based on the start time), for", "scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task is not None @pytest.mark.asyncio async", "\"state\": 1, \"start_time\": \"2018-02-06 13:28:14.477868\", \"end_time\": \"2018-02-06 13:28:14.856375\", \"exit_code\": \"0\",", "= dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] =", "import StorageClientAsync __author__ = \"<NAME>\" __copyright__ = \"Copyright (c) 2017", "_start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time =", "mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task()))", "assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN # Now delete schedule", "schedules = [ { \"id\": \"cea17db8-6ccc-11e7-907b-a6006ad3dba0\", \"process_name\": \"purge\", \"schedule_name\": \"purge\",", "= await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN", "in args1 assert 'OMF to PI north' in args2 @pytest.mark.asyncio", "# WHEN schedule = await scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule,", "get_cat(): return { \"max_running_tasks\": { \"description\": \"The maximum number of", "collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch,", "south' in args1 assert 'OMF to PI north' in args2", "scheduler.stop() # THEN assert retval is True assert scheduler._schedule_executions is", "from fledge.common.storage_client.storage_client import StorageClientAsync __author__ = \"<NAME>\" __copyright__ = \"Copyright", "north') == log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0] assert \"Process started:", "mocker.patch.object(scheduler._logger, \"info\") current_time = time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10,", "assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task", "= log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3]", "# WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time # THEN assert time_after_call", "{ \"name\": \"North Readings to OCS\", \"script\": [ \"tasks/north\", \"--stream_id\",", "False assert scheduler._start_time is None calls = [call('Processing stop request'),", "args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection' in args0 assert", "mock_task_process.schedule = mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id]", "'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})]", "excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with", "with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo:", "day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes", "== log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0] assert \"Process started: Schedule", "log_error, log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused',", "log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert", "datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID(\"2176eb68-7303-11e7-8cf7-a6006ad3dba0\") #", "scheduler._process_scripts is None assert scheduler._ready is False assert scheduler._paused is", "to PI North # WHEN status, message = await scheduler.disable_schedule(sch_id)", "# Assert that there is no task queued for schedule", "# backup queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger,", "pid %s\\n%s\" in args assert 'OMF to PI north' in", "excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with", "None assert tasks[0].end_time is not None assert tasks[0].exit_code is '0'", "@classmethod async def query_tbl(cls, table_name, query=None): if table_name == 'schedules':", "schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL,", "scheduler._start_task(schedule) # Confirm that task has started assert 1 ==", "\"end_time\", \"column\": \"end_time\", \"format\": \"YYYY-MM-DD HH24:MI:SS.MS\"}, \"reason\", \"exit_code\"], \"limit\": 100}", "'%s' task %s pid %s\\n%s\" in args assert 'OMF to", "log_info.call_args_list[3] assert 'stats collection' in args0 assert 'COAP listener south'", "test_interval, is_exception, mocker): # GIVEN scheduler = Scheduler() scheduler._storage =", "= MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger,", "'_process_scripts', return_value=\"North Readings to PI\") # WHEN await scheduler._wait_for_task_completion(mock_task_process) #", "\"description\": \"The maximum age, in days (based on the start", "len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls = [call('Starting'), call('Starting Scheduler:", "== len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks = await scheduler.get_running_tasks() # THEN", "mocker): # Changed in version 3.8: patch() now returns an", "_task_processes={}) # WHEN retval = await scheduler.stop() # THEN assert", "'processName': 'North Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def", "enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker):", "False) mocker.patch.object(scheduler, '_process_scripts', return_value=\"North Readings to PI\") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task()))", "isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_id assert schedule.name == \"purge\"", "side_effect=Exception()) sch_id = uuid.UUID(\"d1631422-9ec6-11e7-abc4-cec278b6b50a\") # backup # WHEN # THEN", "# THEN assert scheduler._purge_tasks_task is not None @pytest.mark.asyncio async def", "\"schedule_day\": 0, \"exclusive\": \"t\", \"enabled\": \"t\" }, { \"id\": \"2b614d26-760f-11e7-b5a5-be2e44b06b34\",", "mock_process(): m = MagicMock() m.pid = 9999 m.terminate = lambda:", "len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker): # GIVEN", "= uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id) # THEN log_params =", "code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None,", "is True assert message == \"Schedule successfully disabled\" assert (scheduler._schedules[sch_id]).id", "exception was raised by Scheduler._purge_tasks %s', \"object MagicMock can't be", "backup await scheduler._get_schedules() # Confirm no. of schedules assert len(scheduler._storage_async.schedules)", "list. assert 'stats collection' in args3 @pytest.mark.asyncio async def test__schedule_first_task(self,", "'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count", "scheduler._ready is False assert scheduler._paused is False assert scheduler._start_time is", "first with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id] is True #", "mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN" ]
[ "on the highest calorie to lowest calorie cupcake Args: calorie", "fit. Each cupcake has a calorie count, and Marc can", "his weight \"\"\" calories = 0 for i, c in", "c in enumerate(sorted(calorie, reverse=True)): calories += (2 ** i *", "= 0 for i, c in enumerate(sorted(calorie, reverse=True)): calories +=", "Each cupcake has a calorie count, and Marc can walk", "cupcake has a calorie count, and Marc can walk a", "but he also likes to stay fit. Each cupcake has", "has eaten j cupcakes so far, after eating a cupcake", "calories = 0 for i, c in enumerate(sorted(calorie, reverse=True)): calories", "\"__main__\": assert marcs_cakewalk([5, 10, 7]) == 44 assert marcs_cakewalk([1, 3,", "can walk a distance to expend those calories. If Marc", "loves cupcakes, but he also likes to stay fit. Each", "0 for i, c in enumerate(sorted(calorie, reverse=True)): calories += (2", "Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem Marc loves cupcakes, but he also likes to", "a calorie count, and Marc can walk a distance to", "lowest calorie cupcake Args: calorie (list): List of integers denoting", "== 44 assert marcs_cakewalk([1, 3, 2]) == 11 assert marcs_cakewalk([7,", "for each cupcake Returns: int: The minimum number of miels", "maintain his weight. Solve: To calculate the minimum miles, you", "cupcakes, but he also likes to stay fit. Each cupcake", "least 2**j x c miles to maintain his weight. Solve:", "for i, c in enumerate(sorted(calorie, reverse=True)): calories += (2 **", "of integers denoting the calories for each cupcake Returns: int:", "integers denoting the calories for each cupcake Returns: int: The", "calories he must walk at least 2**j x c miles", "based on the highest calorie to lowest calorie cupcake Args:", "calories if __name__ == \"__main__\": assert marcs_cakewalk([5, 10, 7]) ==", "calorie cupcake Args: calorie (list): List of integers denoting the", "stay fit. Each cupcake has a calorie count, and Marc", "Marc loves cupcakes, but he also likes to stay fit.", "miles to maintain his weight. Solve: To calculate the minimum", "also likes to stay fit. Each cupcake has a calorie", "return calories if __name__ == \"__main__\": assert marcs_cakewalk([5, 10, 7])", "calorie to lowest calorie cupcake Args: calorie (list): List of", "** i * c) return calories if __name__ == \"__main__\":", "likes to stay fit. Each cupcake has a calorie count,", "denoting the calories for each cupcake Returns: int: The minimum", "count, and Marc can walk a distance to expend those", "assert marcs_cakewalk([1, 3, 2]) == 11 assert marcs_cakewalk([7, 4, 9,", "Solve: To calculate the minimum miles, you solve based on", "each cupcake Returns: int: The minimum number of miels Marc", "eating a cupcake with c calories he must walk at", "+= (2 ** i * c) return calories if __name__", "must walk to maintain his weight \"\"\" calories = 0", "of miels Marc must walk to maintain his weight \"\"\"", "reverse=True)): calories += (2 ** i * c) return calories", "expend those calories. If Marc has eaten j cupcakes so", "in enumerate(sorted(calorie, reverse=True)): calories += (2 ** i * c)", "\"\"\"Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem Marc loves cupcakes, but he also likes", "weight. Solve: To calculate the minimum miles, you solve based", "solve based on the highest calorie to lowest calorie cupcake", "maintain his weight \"\"\" calories = 0 for i, c", "distance to expend those calories. If Marc has eaten j", "Marc must walk to maintain his weight \"\"\" calories =", "he also likes to stay fit. Each cupcake has a", "far, after eating a cupcake with c calories he must", "with c calories he must walk at least 2**j x", "enumerate(sorted(calorie, reverse=True)): calories += (2 ** i * c) return", "marcs_cakewalk(calorie): \"\"\"Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem Marc loves cupcakes, but he also", "j cupcakes so far, after eating a cupcake with c", "cupcake with c calories he must walk at least 2**j", "so far, after eating a cupcake with c calories he", "to maintain his weight. Solve: To calculate the minimum miles,", "to maintain his weight \"\"\" calories = 0 for i,", "number of miels Marc must walk to maintain his weight", "Returns: int: The minimum number of miels Marc must walk", "If Marc has eaten j cupcakes so far, after eating", "i * c) return calories if __name__ == \"__main__\": assert", "* c) return calories if __name__ == \"__main__\": assert marcs_cakewalk([5,", "those calories. If Marc has eaten j cupcakes so far,", "minimum miles, you solve based on the highest calorie to", "miels Marc must walk to maintain his weight \"\"\" calories", "\"\"\" calories = 0 for i, c in enumerate(sorted(calorie, reverse=True)):", "2]) == 11 assert marcs_cakewalk([7, 4, 9, 6]) == 79", "his weight. Solve: To calculate the minimum miles, you solve", "The minimum number of miels Marc must walk to maintain", "calories += (2 ** i * c) return calories if", "i, c in enumerate(sorted(calorie, reverse=True)): calories += (2 ** i", "x c miles to maintain his weight. Solve: To calculate", "walk a distance to expend those calories. If Marc has", "10, 7]) == 44 assert marcs_cakewalk([1, 3, 2]) == 11", "__name__ == \"__main__\": assert marcs_cakewalk([5, 10, 7]) == 44 assert", "walk at least 2**j x c miles to maintain his", "calorie (list): List of integers denoting the calories for each", "weight \"\"\" calories = 0 for i, c in enumerate(sorted(calorie,", "calories for each cupcake Returns: int: The minimum number of", "3, 2]) == 11 assert marcs_cakewalk([7, 4, 9, 6]) ==", "and Marc can walk a distance to expend those calories.", "Marc can walk a distance to expend those calories. If", "at least 2**j x c miles to maintain his weight.", "must walk at least 2**j x c miles to maintain", "the minimum miles, you solve based on the highest calorie", "to stay fit. Each cupcake has a calorie count, and", "marcs_cakewalk([5, 10, 7]) == 44 assert marcs_cakewalk([1, 3, 2]) ==", "he must walk at least 2**j x c miles to", "has a calorie count, and Marc can walk a distance", "calories. If Marc has eaten j cupcakes so far, after", "c) return calories if __name__ == \"__main__\": assert marcs_cakewalk([5, 10,", "if __name__ == \"__main__\": assert marcs_cakewalk([5, 10, 7]) == 44", "(list): List of integers denoting the calories for each cupcake", "a distance to expend those calories. If Marc has eaten", "cupcake Args: calorie (list): List of integers denoting the calories", "Args: calorie (list): List of integers denoting the calories for", "to lowest calorie cupcake Args: calorie (list): List of integers", "Marc has eaten j cupcakes so far, after eating a", "the highest calorie to lowest calorie cupcake Args: calorie (list):", "to expend those calories. If Marc has eaten j cupcakes", "cupcake Returns: int: The minimum number of miels Marc must", "To calculate the minimum miles, you solve based on the", "== \"__main__\": assert marcs_cakewalk([5, 10, 7]) == 44 assert marcs_cakewalk([1,", "calorie count, and Marc can walk a distance to expend", "a cupcake with c calories he must walk at least", "walk to maintain his weight \"\"\" calories = 0 for", "you solve based on the highest calorie to lowest calorie", "calculate the minimum miles, you solve based on the highest", "44 assert marcs_cakewalk([1, 3, 2]) == 11 assert marcs_cakewalk([7, 4,", "c miles to maintain his weight. Solve: To calculate the", "(2 ** i * c) return calories if __name__ ==", "marcs_cakewalk([1, 3, 2]) == 11 assert marcs_cakewalk([7, 4, 9, 6])", "after eating a cupcake with c calories he must walk", "assert marcs_cakewalk([5, 10, 7]) == 44 assert marcs_cakewalk([1, 3, 2])", "miles, you solve based on the highest calorie to lowest", "minimum number of miels Marc must walk to maintain his", "2**j x c miles to maintain his weight. Solve: To", "https://www.hackerrank.com/challenges/marcs-cakewalk/problem Marc loves cupcakes, but he also likes to stay", "7]) == 44 assert marcs_cakewalk([1, 3, 2]) == 11 assert", "int: The minimum number of miels Marc must walk to", "eaten j cupcakes so far, after eating a cupcake with", "the calories for each cupcake Returns: int: The minimum number", "c calories he must walk at least 2**j x c", "highest calorie to lowest calorie cupcake Args: calorie (list): List", "def marcs_cakewalk(calorie): \"\"\"Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem Marc loves cupcakes, but he", "List of integers denoting the calories for each cupcake Returns:", "cupcakes so far, after eating a cupcake with c calories" ]
[ "class_='tick-death')[0].find_all('small')[0].string } for state in state_wise_data ] context = {", "class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string", "'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div',", "soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div', class_='views-row') information =", "class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state in state_wise_data ]", "= requests.get(\"https://www.mygov.in/covid-19\") soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div', class_='views-row')", "] context = { 'corona_info': info, 'data': sorted(corona_info, key=lambda i:", "{ \"state_name\": state.find_all('span', class_='st_name')[0].string, \"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string,", "state in state_wise_data ] context = { 'corona_info': info, 'data':", "corona_html = requests.get(\"https://www.mygov.in/covid-19\") soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div',", "requests from bs4 import BeautifulSoup def corona_data(request): \"Testaaaa\" corona_html =", "= { 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge':", "{ 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div',", "bs4 import BeautifulSoup def corona_data(request): \"Testaaaa\" corona_html = requests.get(\"https://www.mygov.in/covid-19\") soup", "soup.find_all('div', class_='views-row') information = soup.find('div', class_='information_row') info = { 'update_data':", "\"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state in state_wise_data ] context", "state_wise_data = soup.find_all('div', class_='views-row') information = soup.find('div', class_='information_row') info =", "render import requests from bs4 import BeautifulSoup def corona_data(request): \"Testaaaa\"", "class_='views-row') information = soup.find('div', class_='information_row') info = { 'update_data': information.find('div',", "context = { 'corona_info': info, 'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',',", "\"Testaaaa\" corona_html = requests.get(\"https://www.mygov.in/covid-19\") soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data =", "class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string } corona_info = [", "[ { \"state_name\": state.find_all('span', class_='st_name')[0].string, \"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div',", "'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True) } return render(request,", "class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death':", "'corona_info': info, 'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True) }", "= { 'corona_info': info, 'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))),", "soup.find('div', class_='information_row') info = { 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div',", "key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True) } return render(request, 'coronainfo/index.html', context)", "'death': information.find('div', class_='death_case').find('span', class_='icount').string } corona_info = [ { \"state_name\":", "{ 'corona_info': info, 'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True)", "import render import requests from bs4 import BeautifulSoup def corona_data(request):", "= soup.find_all('div', class_='views-row') information = soup.find('div', class_='information_row') info = {", "= BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div', class_='views-row') information = soup.find('div',", "import BeautifulSoup def corona_data(request): \"Testaaaa\" corona_html = requests.get(\"https://www.mygov.in/covid-19\") soup =", "'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span',", "state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state in state_wise_data ] context =", "information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span',", "information.find('div', class_='death_case').find('span', class_='icount').string } corona_info = [ { \"state_name\": state.find_all('span',", "class_='death_case').find('span', class_='icount').string } corona_info = [ { \"state_name\": state.find_all('span', class_='st_name')[0].string,", "requests.get(\"https://www.mygov.in/covid-19\") soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div', class_='views-row') information", "'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string } corona_info", "from bs4 import BeautifulSoup def corona_data(request): \"Testaaaa\" corona_html = requests.get(\"https://www.mygov.in/covid-19\")", "class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state", "\"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\":", "information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string } corona_info =", "class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string }", "information = soup.find('div', class_='information_row') info = { 'update_data': information.find('div', class_='info_title').find('span').string,", "'html.parser') state_wise_data = soup.find_all('div', class_='views-row') information = soup.find('div', class_='information_row') info", "\"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state in", "in state_wise_data ] context = { 'corona_info': info, 'data': sorted(corona_info,", "class_='icount').string } corona_info = [ { \"state_name\": state.find_all('span', class_='st_name')[0].string, \"confirm_case\":", "def corona_data(request): \"Testaaaa\" corona_html = requests.get(\"https://www.mygov.in/covid-19\") soup = BeautifulSoup(corona_html.content, 'html.parser')", "BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div', class_='views-row') information = soup.find('div', class_='information_row')", "from django.shortcuts import render import requests from bs4 import BeautifulSoup", "corona_info = [ { \"state_name\": state.find_all('span', class_='st_name')[0].string, \"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string,", "state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state in state_wise_data", "state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for", "class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string } corona_info = [ {", "class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string", "= soup.find('div', class_='information_row') info = { 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case':", "\"state_name\": state.find_all('span', class_='st_name')[0].string, \"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\":", "BeautifulSoup def corona_data(request): \"Testaaaa\" corona_html = requests.get(\"https://www.mygov.in/covid-19\") soup = BeautifulSoup(corona_html.content,", "} for state in state_wise_data ] context = { 'corona_info':", "information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string,", "info = { 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string,", "django.shortcuts import render import requests from bs4 import BeautifulSoup def", "import requests from bs4 import BeautifulSoup def corona_data(request): \"Testaaaa\" corona_html", "corona_data(request): \"Testaaaa\" corona_html = requests.get(\"https://www.mygov.in/covid-19\") soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data", "= [ { \"state_name\": state.find_all('span', class_='st_name')[0].string, \"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\":", "sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True) } return render(request, 'coronainfo/index.html',", "class_='st_name')[0].string, \"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string,", "} corona_info = [ { \"state_name\": state.find_all('span', class_='st_name')[0].string, \"confirm_case\": state.find_all('div',", "class_='information_row') info = { 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span',", "\"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string }", "for state in state_wise_data ] context = { 'corona_info': info,", "info, 'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True) } return", "state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, \"death\": state.find_all('div',", "state_wise_data ] context = { 'corona_info': info, 'data': sorted(corona_info, key=lambda", "state.find_all('span', class_='st_name')[0].string, \"confirm_case\": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, \"active_case\": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, \"discharge\": state.find_all('div'," ]
[ "= [] model_out = [] comp_out = [] for etype", "time_diff (might need to add this) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\")", "in model .in file as a list truth_arr = filter_times(truth_dict,", "+ etype + \".out\" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" + etype", "[] comp_out = [] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival =", "\" + phase) for offset in times: outp_file.write(\" \" +", "line in f: tmp = line.split() key = tmp[0] +", "# write outputs to file outp_file = open(comp_out, 'w') truth_arr", "= [] with open(file) as f: for ln in f:", "file) model_dict = read_output_to_dict(outf) # read output file for event", "else: phase = 'P' times = key_lookup(event, phase, model_dict) if", "t_upper = t + timedelta(seconds=padding_time) offsets = [] for time", "= key_lookup(event, phase, model_dict) if len(times) == 0: phase =", "23 if sec_int > 59: sec_int = sec_int - 60", "entry = ln.split() entry = entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry", "- fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return model_list def arrivals_to_dictionary(arrivals):", "f: row = ln.split() line = [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()])", "and create a dictionary def read_output_to_dict(filename): model_dict = {} with", "numbers formatted_time = datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if key not in model_dict:", "time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival, inf,", "times) return times # search for arrivals within the padding", "return filtered # read in Caltech model output and create", "times # search for arrivals within the padding time window", "\":\" + entry[12:14] # ------------- TIME STAMP ISSUES -------------------- #", "model_list = [] with open(filename) as f: for ln in", "\"-\" + phase times = [] if key in model_dict.keys():", "if key in arrivals: filtered.append(arrivals[key]) return filtered # read in", "key in arrivals: filtered.append(arrivals[key]) return filtered # read in Caltech", "key not in model_dict: model_dict[key] = [] model_dict[key].append(formatted_time) except: pass", "in model_dict.keys(): times = model_dict[key] times = time_lookup(event[3], times) return", "time # or format time to absolute value: abs(t -", "time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") - fudge_factor # + time_diff (might", "offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival, inf, outf,", "+ phase times = [] if key in model_dict.keys(): times", "\"%Y-%m-%dT%H:%M:%S\") time = time[:-2] + sec_int time = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\")", "phase, model_dict): key = event[0] + \"-\" + event[1] +", "= key[0:-7] picks[key] = arr return picks def model_in_to_array(file): timestamps", "outfile = \"output_files/GPD.\" + etype + \".out\" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile)", "= time + timedelta(seconds=23) + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\")", "formatted_time = datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if key not in model_dict: model_dict[key]", "def model_in_to_array(file): timestamps = [] with open(file) as f: for", "= [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") -", "entry[len(entry)-20:len(entry)-6] entry = entry[0:4] + \"-\" + entry[4:6] + \"-\"", "datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") # ----------------------------------------------------- timestamps.append(time) return timestamps def filter_times(arrivals, model_in):", "event[1] + \"-\" + phase times = [] if key", "offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival, inf, outf, comp_out): #", "picks[key] = arr return picks def model_in_to_array(file): timestamps = []", "+ \"-\" + entry[6:8] + \"T\" + entry[8:10] + \":\"", "timestamp) model_in = model_in_to_array(inf) # read in model .in file", ".mseed files have correct timestamps \"\"\" time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\")", "except: pass return model_dict # lookup time in the dictionary", "entry = entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry = entry[0:4] +", "entry[12:14] # ------------- TIME STAMP ISSUES -------------------- # case 1:", "(t_truth - t_model_pick) import numpy import math import string import", "= datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if time.second >=37 and time.second <=51: time", "time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time = time[:-2] + sec_int time", "outp_file = open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival) # read in", "[P, S, N] Note: N - not detected # -", "formatted_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line)", "model_dict.keys(): times = model_dict[key] times = time_lookup(event[3], times) return times", "def arrivals_to_dictionary(arrivals): picks = {} for arr in arrivals: key", "model_list def arrivals_to_dictionary(arrivals): picks = {} for arr in arrivals:", "open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival) # read in the arrival", "outputs: # - type of event tested [EQS, EQP, SUS,", "def execute_script(arrival, inf, outf, comp_out): # write outputs to file", "+ \".in\" outfile = \"output_files/GPD.\" + etype + \".out\" parsed_arrivals.append(arrival)", "N] Note: N - not detected # - model time", "= [] with open(filename) as f: for ln in f:", "\"%Y-%m-%dT%H:%M:%S.%f\") if key not in model_dict: model_dict[key] = [] model_dict[key].append(formatted_time)", "case 1: run if .mseed files have correct timestamps \"\"\"", "tmp[0] + \"-\" + tmp[1] + \"-\" + tmp[2] try:", "\".arrivals.txt\" infile = \"input_files/GPD.\" + etype + \".in\" outfile =", "+ \"-\" + event[1] + \"-\" + phase times =", "and time.second <=51: time = time + timedelta(seconds=23) + time_diff", "to a dictionary (key=truncated timestamp) model_in = model_in_to_array(inf) # read", "pass return model_dict # lookup time in the dictionary def", "+ \".arrivals.txt\" infile = \"input_files/GPD.\" + etype + \".in\" outfile", "model_in): filtered = [] for key in model_in: if key", "execute_script(arrival, inf, outf, comp_out): # write outputs to file outp_file", "the model (.in file) model_dict = read_output_to_dict(outf) # read output", "import os import csv from datetime import datetime from datetime", "{} for arr in arrivals: key = datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key", "(.in file) model_dict = read_output_to_dict(outf) # read output file for", "+ \" \" + phase) for offset in times: outp_file.write(\"", "read output file for event in truth_arr: phase = event[2]", "[] with open(file) as f: for ln in f: entry", "= t - timedelta(seconds=padding_time) t_upper = t + timedelta(seconds=padding_time) offsets", "in arrivals: filtered.append(arrivals[key]) return filtered # read in Caltech model", "t + timedelta(seconds=padding_time) offsets = [] for time in time_arr:", "for ln in f: row = ln.split() line = []", "phase = 'S' else: phase = 'P' times = key_lookup(event,", "t_lower = t - timedelta(seconds=padding_time) t_upper = t + timedelta(seconds=padding_time)", "if time.second >=37 and time.second <=51: time = time +", "= \"output_files/GPD.\" + etype + \".out\" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\"", "datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if key not in model_dict: model_dict[key] = []", "def key_lookup(event, phase, model_dict): key = event[0] + \"-\" +", "timedelta # params padding_time = 10 fudge_factor = timedelta(seconds=27) time_diff", "time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") # ----------------------------------------------------- timestamps.append(time) return timestamps def", "phase = event[2] times = key_lookup(event, phase, model_dict) if len(times)", "the timestamps time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if time.second >=37 and", "+ entry[12:14] # ------------- TIME STAMP ISSUES -------------------- # case", "PXP] # - phase [P, S, N] Note: N -", "model_list.append(line) return model_list def arrivals_to_dictionary(arrivals): picks = {} for arr", "# - type of event tested [EQS, EQP, SUS, SUP,", "time_lookup(t, time_arr): t_lower = t - timedelta(seconds=padding_time) t_upper = t", "tests GPD model using UW truth data # Test outputs:", "model_dict = {} with open(filename) as f: for line in", "time_lookup(event[3], times) return times # search for arrivals within the", "padding time window def time_lookup(t, time_arr): t_lower = t -", "entry[8:10] + \":\" + entry[10:12] + \":\" + entry[12:14] #", "\"output_files/GPD.\" + etype + \".out\" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" +", "outf, comp_out): # write outputs to file outp_file = open(comp_out,", "t_model_pick) import numpy import math import string import datetime import", "# file dirs parsed_arrivals = [] model_in = [] model_out", ">=37 and time.second <=51: time = time + timedelta(seconds=23) +", "= timedelta(seconds=10) # file dirs parsed_arrivals = [] model_in =", "in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = \"parsed_arrivals/\" + etype + \".arrivals.txt\" infile", "key in model_dict.keys(): times = model_dict[key] times = time_lookup(event[3], times)", "in arrivals: key = datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key = key[0:-7] picks[key]", "array def read_arrivals_to_arr(filename): model_list = [] with open(filename) as f:", "key in model_in: if key in arrivals: filtered.append(arrivals[key]) return filtered", "sys import os import csv from datetime import datetime from", "line = [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\")", "sec_int time = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") + time_diff time = datetime.strftime(time,", "= datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if key not in model_dict: model_dict[key] =", "+ tmp[2] try: # fails if date is missing floating", "tmp[1] + \"-\" + tmp[2] try: # fails if date", "event tested [EQS, EQP, SUS, SUP, THS, THP, SNS, SNP,", "in time_arr: if time > t_lower and time < t_upper:", "truth_arr = read_arrivals_to_arr(arrival) # read in the arrival times to", "datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else: sec_int = time.second + 23 if sec_int", "file dirs parsed_arrivals = [] model_in = [] model_out =", "Note: N - not detected # - model time offset", "datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\" # case 2: run if .mseed files", "- not detected # - model time offset (t_truth -", "this) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\" # case 2: run", "= t + timedelta(seconds=padding_time) offsets = [] for time in", "SUP, THS, THP, SNS, SNP, PXS, PXP] # - phase", "= t - time # or format time to absolute", "to a list truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to", "+ entry[6:8] + \"T\" + entry[8:10] + \":\" + entry[10:12]", "= arr return picks def model_in_to_array(file): timestamps = [] with", "# ------------- TIME STAMP ISSUES -------------------- # case 1: run", "dictionary def key_lookup(event, phase, model_dict): key = event[0] + \"-\"", "[EQS, EQP, SUS, SUP, THS, THP, SNS, SNP, PXS, PXP]", "correct timestamps \"\"\" time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") - fudge_factor #", "list truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary", "timestamps \"\"\" time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") - fudge_factor # +", "line.split() key = tmp[0] + \"-\" + tmp[1] + \"-\"", "def filter_times(arrivals, model_in): filtered = [] for key in model_in:", "PXS, PXP] # - phase [P, S, N] Note: N", "len(times) == 0: phase = 'N' times = ['nan'] outp_file.write(str(event[5])", "key_lookup(event, phase, model_dict): key = event[0] + \"-\" + event[1]", "= model_in_to_array(inf) # read in model .in file as a", "picks def model_in_to_array(file): timestamps = [] with open(file) as f:", "times: outp_file.write(\" \" + str(offset)) outp_file.write('\\n') outp_file.close() for i in", "read in UW arrival times as an array def read_arrivals_to_arr(filename):", "------------- TIME STAMP ISSUES -------------------- # case 1: run if", "in UW arrival times as an array def read_arrivals_to_arr(filename): model_list", "passed to the model (.in file) model_dict = read_output_to_dict(outf) #", "comp_out): # write outputs to file outp_file = open(comp_out, 'w')", "etype + \".out\" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" + etype +", "detected # - model time offset (t_truth - t_model_pick) import", "parsed_arrivals = [] model_in = [] model_out = [] comp_out", "key = key[0:-7] picks[key] = arr return picks def model_in_to_array(file):", "(key=truncated timestamp) model_in = model_in_to_array(inf) # read in model .in", "- t_model_pick) import numpy import math import string import datetime", "ln.split() line = [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3],", "if .mseed files have buggy minutes in the timestamps time", "= time.second + 23 if sec_int > 59: sec_int =", "in truth_arr: phase = event[2] times = key_lookup(event, phase, model_dict)", "\"%Y-%m-%dT%H:%M:%S\") - fudge_factor # + time_diff (might need to add", "[] with open(filename) as f: for ln in f: row", "floating point numbers formatted_time = datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if key not", "+ \".out\") # ------------------ # read in UW arrival times", "timedelta(seconds=27) time_diff = timedelta(seconds=10) # file dirs parsed_arrivals = []", "offsets = [] for time in time_arr: if time >", "TIME STAMP ISSUES -------------------- # case 1: run if .mseed", "outp_file.write(str(event[5]) + \" \" + phase) for offset in times:", "\"%Y-%m-%dT%H:%M:%S\") else: sec_int = time.second + 23 if sec_int >", "as f: for line in f: tmp = line.split() key", "if time > t_lower and time < t_upper: offset =", "= datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else: sec_int = time.second + 23 if", "- phase [P, S, N] Note: N - not detected", "UW arrival times as an array def read_arrivals_to_arr(filename): model_list =", "# read in model .in file as a list truth_arr", "arrivals: key = datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key = key[0:-7] picks[key] =", "= [] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = \"parsed_arrivals/\" +", "= open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival) # read in the", "time = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\")", "if len(times) == 0: if phase == 'P': phase =", "= sec_int - 60 sec_int = str(sec_int).zfill(2) time = datetime.strftime(time,", "a list truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to a", "case 2: run if .mseed files have buggy minutes in", "datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key = key[0:-7] picks[key] = arr return picks", "-------------------- # case 1: run if .mseed files have correct", "timedelta(seconds=23) + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else: sec_int =", "for arr in arrivals: key = datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key =", "model_dict): key = event[0] + \"-\" + event[1] + \"-\"", "\"\"\" time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") - fudge_factor # + time_diff", "read_output_to_dict(outf) # read output file for event in truth_arr: phase", "str(offset)) outp_file.write('\\n') outp_file.close() for i in range(len(model_out)): execute_script(parsed_arrivals[i], model_in[i], model_out[i],", "time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else: sec_int = time.second +", "[] for key in model_in: if key in arrivals: filtered.append(arrivals[key])", "= [] for key in model_in: if key in arrivals:", "event[2] times = key_lookup(event, phase, model_dict) if len(times) == 0:", "timedelta(seconds=padding_time) t_upper = t + timedelta(seconds=padding_time) offsets = [] for", "# ----------------------------------------------------- timestamps.append(time) return timestamps def filter_times(arrivals, model_in): filtered =", "+ str(offset)) outp_file.write('\\n') outp_file.close() for i in range(len(model_out)): execute_script(parsed_arrivals[i], model_in[i],", "= time[:-2] + sec_int time = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") + time_diff", "in times: outp_file.write(\" \" + str(offset)) outp_file.write('\\n') outp_file.close() for i", "Caltech model output and create a dictionary def read_output_to_dict(filename): model_dict", "time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\" # case 2: run if", "value: abs(t - time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets", "type of event tested [EQS, EQP, SUS, SUP, THS, THP,", "f: for line in f: tmp = line.split() key =", "timestamps time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if time.second >=37 and time.second", "+ \"-\" + phase times = [] if key in", "a dictionary (key=truncated timestamp) model_in = model_in_to_array(inf) # read in", "sec_int = time.second + 23 if sec_int > 59: sec_int", "return picks def model_in_to_array(file): timestamps = [] with open(file) as", "import csv from datetime import datetime from datetime import timedelta", "# + time_diff (might need to add this) time =", "arrivals to picks that were passed to the model (.in", "missing floating point numbers formatted_time = datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if key", "entry[10:12] + \":\" + entry[12:14] # ------------- TIME STAMP ISSUES", "model_dict: model_dict[key] = [] model_dict[key].append(formatted_time) except: pass return model_dict #", "= entry[len(entry)-20:len(entry)-6] entry = entry[0:4] + \"-\" + entry[4:6] +", "+ \"-\" + tmp[2] try: # fails if date is", "= datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") # ----------------------------------------------------- timestamps.append(time) return timestamps def filter_times(arrivals,", "tmp = line.split() key = tmp[0] + \"-\" + tmp[1]", "0: phase = 'N' times = ['nan'] outp_file.write(str(event[5]) + \"", "sec_int = sec_int - 60 sec_int = str(sec_int).zfill(2) time =", "+ tmp[1] + \"-\" + tmp[2] try: # fails if", "times = model_dict[key] times = time_lookup(event[3], times) return times #", "etype + \".out\") # ------------------ # read in UW arrival", "+ \"-\" + tmp[1] + \"-\" + tmp[2] try: #", "read_arrivals_to_arr(filename): model_list = [] with open(filename) as f: for ln", "STAMP ISSUES -------------------- # case 1: run if .mseed files", "row[4].strip(), row[5].strip()]) model_list.append(line) return model_list def arrivals_to_dictionary(arrivals): picks = {}", "in f: tmp = line.split() key = tmp[0] + \"-\"", "to absolute value: abs(t - time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset))", "model_dict = read_output_to_dict(outf) # read output file for event in", "data # Test outputs: # - type of event tested", "to add this) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\" # case", "outputs to file outp_file = open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival)", "Script tests GPD model using UW truth data # Test", "arrivals: filtered.append(arrivals[key]) return filtered # read in Caltech model output", "# Test outputs: # - type of event tested [EQS,", "> 59: sec_int = sec_int - 60 sec_int = str(sec_int).zfill(2)", "# convert arrivals to a dictionary (key=truncated timestamp) model_in =", "if len(times) == 0: phase = 'N' times = ['nan']", "'S' else: phase = 'P' times = key_lookup(event, phase, model_dict)", "# read in the arrival times to a list truth_dict", "phase = 'N' times = ['nan'] outp_file.write(str(event[5]) + \" \"", "truth data # Test outputs: # - type of event", "line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return model_list def arrivals_to_dictionary(arrivals): picks =", "math import string import datetime import sys import os import", "= 'N' times = ['nan'] outp_file.write(str(event[5]) + \" \" +", "[] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = \"parsed_arrivals/\" + etype", "entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry = entry[0:4] + \"-\" +", "\"-\" + entry[6:8] + \"T\" + entry[8:10] + \":\" +", "and time < t_upper: offset = t - time #", "== 0: phase = 'N' times = ['nan'] outp_file.write(str(event[5]) +", "fails if date is missing floating point numbers formatted_time =", "+ etype + \".in\" outfile = \"output_files/GPD.\" + etype +", "EQP, SUS, SUP, THS, THP, SNS, SNP, PXS, PXP] #", "datetime import sys import os import csv from datetime import", "# search for arrivals within the padding time window def", "a dictionary def read_output_to_dict(filename): model_dict = {} with open(filename) as", "model_in = model_in_to_array(inf) # read in model .in file as", "arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary (key=truncated timestamp) model_in", "\"-\" + event[1] + \"-\" + phase times = []", "datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") # -----------------------------------------------------", "model_in) # filter arrivals to picks that were passed to", "truth_arr: phase = event[2] times = key_lookup(event, phase, model_dict) if", "time < t_upper: offset = t - time # or", "f: tmp = line.split() key = tmp[0] + \"-\" +", "\".out\" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" + etype + \".out\") #", "# Script tests GPD model using UW truth data #", "times as an array def read_arrivals_to_arr(filename): model_list = [] with", "in the arrival times to a list truth_dict = arrivals_to_dictionary(truth_arr)", "minutes in the timestamps time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if time.second", "outp_file.write('\\n') outp_file.close() for i in range(len(model_out)): execute_script(parsed_arrivals[i], model_in[i], model_out[i], comp_out[i])", "= arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary (key=truncated timestamp)", "= filter_times(truth_dict, model_in) # filter arrivals to picks that were", "# case 1: run if .mseed files have correct timestamps", "were passed to the model (.in file) model_dict = read_output_to_dict(outf)", "model_in_to_array(inf) # read in model .in file as a list", "datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time = time[:-2] + sec_int time = datetime.strptime(time,", "\" \" + phase) for offset in times: outp_file.write(\" \"", "2: run if .mseed files have buggy minutes in the", "datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if time.second >=37 and time.second <=51: time =", "model_out = [] comp_out = [] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']:", "in model_in: if key in arrivals: filtered.append(arrivals[key]) return filtered #", "\".in\" outfile = \"output_files/GPD.\" + etype + \".out\" parsed_arrivals.append(arrival) model_in.append(infile)", "model_dict[key] times = time_lookup(event[3], times) return times # search for", "# read in UW arrival times as an array def", "# lookup time in the dictionary def key_lookup(event, phase, model_dict):", "sec_int > 59: sec_int = sec_int - 60 sec_int =", "model_dict[key].append(formatted_time) except: pass return model_dict # lookup time in the", "SUS, SUP, THS, THP, SNS, SNP, PXS, PXP] # -", "f: for ln in f: row = ln.split() line =", "key[0:-7] picks[key] = arr return picks def model_in_to_array(file): timestamps =", "[] model_in = [] model_out = [] comp_out = []", "date is missing floating point numbers formatted_time = datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\")", "row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor line.extend([formatted_time, row[4].strip(),", "the dictionary def key_lookup(event, phase, model_dict): key = event[0] +", "output and create a dictionary def read_output_to_dict(filename): model_dict = {}", "to the model (.in file) model_dict = read_output_to_dict(outf) # read", "key_lookup(event, phase, model_dict) if len(times) == 0: phase = 'N'", "tested [EQS, EQP, SUS, SUP, THS, THP, SNS, SNP, PXS,", "filtered # read in Caltech model output and create a", "+ \"-\" + entry[4:6] + \"-\" + entry[6:8] + \"T\"", "= key_lookup(event, phase, model_dict) if len(times) == 0: if phase", "= time_lookup(event[3], times) return times # search for arrivals within", "output file for event in truth_arr: phase = event[2] times", "if date is missing floating point numbers formatted_time = datetime.strptime(tmp[3],", ".mseed files have buggy minutes in the timestamps time =", "+ time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") # ----------------------------------------------------- timestamps.append(time) return", "filtered.append(arrivals[key]) return filtered # read in Caltech model output and", "+ sec_int time = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") + time_diff time =", "have buggy minutes in the timestamps time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\")", "the arrival times to a list truth_dict = arrivals_to_dictionary(truth_arr) #", "+ 23 if sec_int > 59: sec_int = sec_int -", "datetime import timedelta # params padding_time = 10 fudge_factor =", "def read_arrivals_to_arr(filename): model_list = [] with open(filename) as f: for", "= read_arrivals_to_arr(arrival) # read in the arrival times to a", "etype + \".arrivals.txt\" infile = \"input_files/GPD.\" + etype + \".in\"", "etype + \".in\" outfile = \"output_files/GPD.\" + etype + \".out\"", "+ entry[4:6] + \"-\" + entry[6:8] + \"T\" + entry[8:10]", "\"parsed_arrivals/\" + etype + \".arrivals.txt\" infile = \"input_files/GPD.\" + etype", "fudge_factor # + time_diff (might need to add this) time", "\":\" + entry[10:12] + \":\" + entry[12:14] # ------------- TIME", "infile = \"input_files/GPD.\" + etype + \".in\" outfile = \"output_files/GPD.\"", "time.second <=51: time = time + timedelta(seconds=23) + time_diff time", "timestamps def filter_times(arrivals, model_in): filtered = [] for key in", "= [] model_in = [] model_out = [] comp_out =", "window def time_lookup(t, time_arr): t_lower = t - timedelta(seconds=padding_time) t_upper", "# read output file for event in truth_arr: phase =", "times = key_lookup(event, phase, model_dict) if len(times) == 0: phase", "filter_times(truth_dict, model_in) # filter arrivals to picks that were passed", "ln in f: row = ln.split() line = [] line.extend([row[0].strip(),", "not in model_dict: model_dict[key] = [] model_dict[key].append(formatted_time) except: pass return", "UW truth data # Test outputs: # - type of", "# params padding_time = 10 fudge_factor = timedelta(seconds=27) time_diff =", "= entry[0:4] + \"-\" + entry[4:6] + \"-\" + entry[6:8]", "= [] comp_out = [] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival", "model .in file as a list truth_arr = filter_times(truth_dict, model_in)", "entry = entry[0:4] + \"-\" + entry[4:6] + \"-\" +", "\"-\" + tmp[1] + \"-\" + tmp[2] try: # fails", "if key in model_dict.keys(): times = model_dict[key] times = time_lookup(event[3],", "fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return model_list def arrivals_to_dictionary(arrivals): picks", "open(file) as f: for ln in f: entry = ln.split()", "comp_out.append(\"comparison_out/comp.\" + etype + \".out\") # ------------------ # read in", "+ \":\" + entry[12:14] # ------------- TIME STAMP ISSUES --------------------", "time + timedelta(seconds=23) + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else:", "picks = {} for arr in arrivals: key = datetime.strftime(arr[3],", "\"%Y-%m-%dT%H:%M:%S\") # ----------------------------------------------------- timestamps.append(time) return timestamps def filter_times(arrivals, model_in): filtered", "in f: row = ln.split() line = [] line.extend([row[0].strip(), row[1].strip(),", "model output and create a dictionary def read_output_to_dict(filename): model_dict =", "open(filename) as f: for line in f: tmp = line.split()", "\"%Y-%m-%dT%H:%M:%S\") if time.second >=37 and time.second <=51: time = time", "else: sec_int = time.second + 23 if sec_int > 59:", "== 0: if phase == 'P': phase = 'S' else:", "SNS, SNP, PXS, PXP] # - phase [P, S, N]", "dictionary (key=truncated timestamp) model_in = model_in_to_array(inf) # read in model", "key = tmp[0] + \"-\" + tmp[1] + \"-\" +", "run if .mseed files have buggy minutes in the timestamps", "not detected # - model time offset (t_truth - t_model_pick)", "picks that were passed to the model (.in file) model_dict", "entry[0:4] + \"-\" + entry[4:6] + \"-\" + entry[6:8] +", "fudge_factor = timedelta(seconds=27) time_diff = timedelta(seconds=10) # file dirs parsed_arrivals", "run if .mseed files have correct timestamps \"\"\" time =", "time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if time.second >=37 and time.second <=51:", "time in the dictionary def key_lookup(event, phase, model_dict): key =", "= 'S' else: phase = 'P' times = key_lookup(event, phase,", "\"%Y-%m-%dT%H:%M:%S\") \"\"\" # case 2: run if .mseed files have", "from datetime import datetime from datetime import timedelta # params", "model_in = [] model_out = [] comp_out = [] for", "entry = entry[len(entry)-20:len(entry)-6] entry = entry[0:4] + \"-\" + entry[4:6]", "- time # or format time to absolute value: abs(t", "- type of event tested [EQS, EQP, SUS, SUP, THS,", "ln in f: entry = ln.split() entry = entry[0].strip() entry", "= datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\" # case 2: run if .mseed", "- fudge_factor # + time_diff (might need to add this)", "if phase == 'P': phase = 'S' else: phase =", "times = [] if key in model_dict.keys(): times = model_dict[key]", "\"\"\" # case 2: run if .mseed files have buggy", "datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return model_list", "import string import datetime import sys import os import csv", "= line.split() key = tmp[0] + \"-\" + tmp[1] +", "60 sec_int = str(sec_int).zfill(2) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time =", "phase) for offset in times: outp_file.write(\" \" + str(offset)) outp_file.write('\\n')", "datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") - fudge_factor # + time_diff (might need to", "= event[0] + \"-\" + event[1] + \"-\" + phase", "return times # search for arrivals within the padding time", "GPD model using UW truth data # Test outputs: #", "model_dict) if len(times) == 0: phase = 'N' times =", "for time in time_arr: if time > t_lower and time", "+ etype + \".arrivals.txt\" infile = \"input_files/GPD.\" + etype +", "0: if phase == 'P': phase = 'S' else: phase", "for ln in f: entry = ln.split() entry = entry[0].strip()", "10 fudge_factor = timedelta(seconds=27) time_diff = timedelta(seconds=10) # file dirs", "a list truth_arr = filter_times(truth_dict, model_in) # filter arrivals to", "time_arr: if time > t_lower and time < t_upper: offset", "for line in f: tmp = line.split() key = tmp[0]", "lookup time in the dictionary def key_lookup(event, phase, model_dict): key", "return timestamps def filter_times(arrivals, model_in): filtered = [] for key", "time window def time_lookup(t, time_arr): t_lower = t - timedelta(seconds=padding_time)", "for key in model_in: if key in arrivals: filtered.append(arrivals[key]) return", "padding_time = 10 fudge_factor = timedelta(seconds=27) time_diff = timedelta(seconds=10) #", "as f: for ln in f: entry = ln.split() entry", "from datetime import timedelta # params padding_time = 10 fudge_factor", "datetime import datetime from datetime import timedelta # params padding_time", "THS, THP, SNS, SNP, PXS, PXP] # - phase [P,", "timestamps = [] with open(file) as f: for ln in", "read_arrivals_to_arr(arrival) # read in the arrival times to a list", "as an array def read_arrivals_to_arr(filename): model_list = [] with open(filename)", "\"%Y-%m-%dT%H:%M:%S\") + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") # ----------------------------------------------------- timestamps.append(time)", "numpy import math import string import datetime import sys import", "'P': phase = 'S' else: phase = 'P' times =", "= datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") #", "- model time offset (t_truth - t_model_pick) import numpy import", "ln.split() entry = entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry = entry[0:4]", "# filter arrivals to picks that were passed to the", "string import datetime import sys import os import csv from", "for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = \"parsed_arrivals/\" + etype +", "buggy minutes in the timestamps time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if", "as a list truth_arr = filter_times(truth_dict, model_in) # filter arrivals", "line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor line.extend([formatted_time,", "model (.in file) model_dict = read_output_to_dict(outf) # read output file", "to file outp_file = open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival) #", "# ------------------ # read in UW arrival times as an", "phase == 'P': phase = 'S' else: phase = 'P'", "----------------------------------------------------- timestamps.append(time) return timestamps def filter_times(arrivals, model_in): filtered = []", "time = time + timedelta(seconds=23) + time_diff time = datetime.strftime(time,", "time offset (t_truth - t_model_pick) import numpy import math import", "search for arrivals within the padding time window def time_lookup(t,", "[] for time in time_arr: if time > t_lower and", "key = event[0] + \"-\" + event[1] + \"-\" +", "[] model_out = [] comp_out = [] for etype in", "= entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry = entry[0:4] + \"-\"", "have correct timestamps \"\"\" time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") - fudge_factor", "> t_lower and time < t_upper: offset = t -", "------------------ # read in UW arrival times as an array", "= \"parsed_arrivals/\" + etype + \".arrivals.txt\" infile = \"input_files/GPD.\" +", "= [] model_dict[key].append(formatted_time) except: pass return model_dict # lookup time", "# - phase [P, S, N] Note: N - not", "the padding time window def time_lookup(t, time_arr): t_lower = t", "[] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor", "import datetime from datetime import timedelta # params padding_time =", "entry[4:6] + \"-\" + entry[6:8] + \"T\" + entry[8:10] +", "['nan'] outp_file.write(str(event[5]) + \" \" + phase) for offset in", "timedelta(seconds=padding_time) offsets = [] for time in time_arr: if time", "of event tested [EQS, EQP, SUS, SUP, THS, THP, SNS,", "arrivals within the padding time window def time_lookup(t, time_arr): t_lower", "truth_arr = filter_times(truth_dict, model_in) # filter arrivals to picks that", "format time to absolute value: abs(t - time) offset =", "file outp_file = open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival) # read", "to picks that were passed to the model (.in file)", "time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") # ----------------------------------------------------- timestamps.append(time) return timestamps", "+ time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else: sec_int = time.second", "arrivals_to_dictionary(arrivals): picks = {} for arr in arrivals: key =", "{} with open(filename) as f: for line in f: tmp", "in the timestamps time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") if time.second >=37", "if .mseed files have correct timestamps \"\"\" time = datetime.strptime(entry,", "f: entry = ln.split() entry = entry[0].strip() entry = entry[len(entry)-20:len(entry)-6]", "with open(filename) as f: for line in f: tmp =", "time.second + 23 if sec_int > 59: sec_int = sec_int", "= datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") - fudge_factor # + time_diff (might need", "= \"input_files/GPD.\" + etype + \".in\" outfile = \"output_files/GPD.\" +", "# - model time offset (t_truth - t_model_pick) import numpy", "= 10 fudge_factor = timedelta(seconds=27) time_diff = timedelta(seconds=10) # file", "an array def read_arrivals_to_arr(filename): model_list = [] with open(filename) as", "import math import string import datetime import sys import os", "\"T\" + entry[8:10] + \":\" + entry[10:12] + \":\" +", "with open(file) as f: for ln in f: entry =", "time = time[:-2] + sec_int time = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") +", "= ln.split() line = [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time =", "datetime from datetime import timedelta # params padding_time = 10", "offset in times: outp_file.write(\" \" + str(offset)) outp_file.write('\\n') outp_file.close() for", "= read_output_to_dict(outf) # read output file for event in truth_arr:", "\"input_files/GPD.\" + etype + \".in\" outfile = \"output_files/GPD.\" + etype", "read_output_to_dict(filename): model_dict = {} with open(filename) as f: for line", "inf, outf, comp_out): # write outputs to file outp_file =", "arrival = \"parsed_arrivals/\" + etype + \".arrivals.txt\" infile = \"input_files/GPD.\"", "= [] if key in model_dict.keys(): times = model_dict[key] times", "in Caltech model output and create a dictionary def read_output_to_dict(filename):", "offsets def execute_script(arrival, inf, outf, comp_out): # write outputs to", "outp_file.write(\" \" + str(offset)) outp_file.write('\\n') outp_file.close() for i in range(len(model_out)):", "def read_output_to_dict(filename): model_dict = {} with open(filename) as f: for", "SNP, PXS, PXP] # - phase [P, S, N] Note:", "as f: for ln in f: row = ln.split() line", "csv from datetime import datetime from datetime import timedelta #", "row[5].strip()]) model_list.append(line) return model_list def arrivals_to_dictionary(arrivals): picks = {} for", "in model_dict: model_dict[key] = [] model_dict[key].append(formatted_time) except: pass return model_dict", "time_arr): t_lower = t - timedelta(seconds=padding_time) t_upper = t +", "arr return picks def model_in_to_array(file): timestamps = [] with open(file)", "['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = \"parsed_arrivals/\" + etype + \".arrivals.txt\" infile =", "point numbers formatted_time = datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if key not in", "= offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival, inf, outf, comp_out):", "= ['nan'] outp_file.write(str(event[5]) + \" \" + phase) for offset", "time_diff = timedelta(seconds=10) # file dirs parsed_arrivals = [] model_in", "model_dict # lookup time in the dictionary def key_lookup(event, phase,", "THP, SNS, SNP, PXS, PXP] # - phase [P, S,", "= tmp[0] + \"-\" + tmp[1] + \"-\" + tmp[2]", "+ etype + \".out\") # ------------------ # read in UW", "= {} with open(filename) as f: for line in f:", "try: # fails if date is missing floating point numbers", "key_lookup(event, phase, model_dict) if len(times) == 0: if phase ==", "S, N] Note: N - not detected # - model", "phase [P, S, N] Note: N - not detected #", "times to a list truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals", "truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary (key=truncated", "file as a list truth_arr = filter_times(truth_dict, model_in) # filter", "return offsets def execute_script(arrival, inf, outf, comp_out): # write outputs", "time to absolute value: abs(t - time) offset = offset.total_seconds()", "+ time_diff (might need to add this) time = datetime.strftime(time,", "t - timedelta(seconds=padding_time) t_upper = t + timedelta(seconds=padding_time) offsets =", "list truth_arr = filter_times(truth_dict, model_in) # filter arrivals to picks", "= 'P' times = key_lookup(event, phase, model_dict) if len(times) ==", "- timedelta(seconds=padding_time) t_upper = t + timedelta(seconds=padding_time) offsets = []", "os import csv from datetime import datetime from datetime import", "files have buggy minutes in the timestamps time = datetime.strptime(entry,", "< t_upper: offset = t - time # or format", "model time offset (t_truth - t_model_pick) import numpy import math", "== 'P': phase = 'S' else: phase = 'P' times", "need to add this) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\" #", "+ \"T\" + entry[8:10] + \":\" + entry[10:12] + \":\"", "- time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival,", "offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival, inf, outf, comp_out): # write", "entry[6:8] + \"T\" + entry[8:10] + \":\" + entry[10:12] +", "times = time_lookup(event[3], times) return times # search for arrivals", "# or format time to absolute value: abs(t - time)", "+ timedelta(seconds=23) + time_diff time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else: sec_int", "using UW truth data # Test outputs: # - type", "# case 2: run if .mseed files have buggy minutes", "= [] for time in time_arr: if time > t_lower", "row = ln.split() line = [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time", "import numpy import math import string import datetime import sys", "abs(t - time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def", "\"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return model_list def", "ISSUES -------------------- # case 1: run if .mseed files have", "phase times = [] if key in model_dict.keys(): times =", "model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" + etype + \".out\") # ------------------ # read", "arrival times as an array def read_arrivals_to_arr(filename): model_list = []", "event in truth_arr: phase = event[2] times = key_lookup(event, phase,", "write outputs to file outp_file = open(comp_out, 'w') truth_arr =", ".in file as a list truth_arr = filter_times(truth_dict, model_in) #", "+ phase) for offset in times: outp_file.write(\" \" + str(offset))", "row[2].strip()]) formatted_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()])", "model using UW truth data # Test outputs: # -", "params padding_time = 10 fudge_factor = timedelta(seconds=27) time_diff = timedelta(seconds=10)", "= datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key = key[0:-7] picks[key] = arr return", "= datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time = time[:-2] + sec_int time =", "offset (t_truth - t_model_pick) import numpy import math import string", "create a dictionary def read_output_to_dict(filename): model_dict = {} with open(filename)", "\" + str(offset)) outp_file.write('\\n') outp_file.close() for i in range(len(model_out)): execute_script(parsed_arrivals[i],", "phase = 'P' times = key_lookup(event, phase, model_dict) if len(times)", "read in the arrival times to a list truth_dict =", "'P' times = key_lookup(event, phase, model_dict) if len(times) == 0:", "= timedelta(seconds=27) time_diff = timedelta(seconds=10) # file dirs parsed_arrivals =", "import sys import os import csv from datetime import datetime", "or format time to absolute value: abs(t - time) offset", "convert arrivals to a dictionary (key=truncated timestamp) model_in = model_in_to_array(inf)", "arrival times to a list truth_dict = arrivals_to_dictionary(truth_arr) # convert", "= {} for arr in arrivals: key = datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\")", "key = datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key = key[0:-7] picks[key] = arr", "read in model .in file as a list truth_arr =", "arr in arrivals: key = datetime.strftime(arr[3], \"%Y-%m-%dT%H:%M:%S.%f\") key = key[0:-7]", "model_dict[key] = [] model_dict[key].append(formatted_time) except: pass return model_dict # lookup", "if sec_int > 59: sec_int = sec_int - 60 sec_int", "model_in_to_array(file): timestamps = [] with open(file) as f: for ln", "tmp[2] try: # fails if date is missing floating point", "\"%Y-%m-%dT%H:%M:%S.%f\") key = key[0:-7] picks[key] = arr return picks def", "filter arrivals to picks that were passed to the model", "offset = t - time # or format time to", "if key not in model_dict: model_dict[key] = [] model_dict[key].append(formatted_time) except:", "time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") else: sec_int = time.second + 23", "<=51: time = time + timedelta(seconds=23) + time_diff time =", "parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" + etype + \".out\") # ------------------", "t_lower and time < t_upper: offset = t - time", "t - time # or format time to absolute value:", "+ \":\" + entry[10:12] + \":\" + entry[12:14] # -------------", "f: for ln in f: entry = ln.split() entry =", "= ln.split() entry = entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry =", "1: run if .mseed files have correct timestamps \"\"\" time", "+ entry[10:12] + \":\" + entry[12:14] # ------------- TIME STAMP", "filter_times(arrivals, model_in): filtered = [] for key in model_in: if", "file for event in truth_arr: phase = event[2] times =", "comp_out = [] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = \"parsed_arrivals/\"", "= model_dict[key] times = time_lookup(event[3], times) return times # search", "timedelta(seconds=10) # file dirs parsed_arrivals = [] model_in = []", "= str(sec_int).zfill(2) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time = time[:-2] +", "in f: entry = ln.split() entry = entry[0].strip() entry =", "for offset in times: outp_file.write(\" \" + str(offset)) outp_file.write('\\n') outp_file.close()", "time > t_lower and time < t_upper: offset = t", "dictionary def read_output_to_dict(filename): model_dict = {} with open(filename) as f:", "phase, model_dict) if len(times) == 0: if phase == 'P':", "- 60 sec_int = str(sec_int).zfill(2) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time", "[] model_dict[key].append(formatted_time) except: pass return model_dict # lookup time in", "return model_list def arrivals_to_dictionary(arrivals): picks = {} for arr in", "+ entry[8:10] + \":\" + entry[10:12] + \":\" + entry[12:14]", "len(times) == 0: if phase == 'P': phase = 'S'", "+ timedelta(seconds=padding_time) offsets = [] for time in time_arr: if", "in the dictionary def key_lookup(event, phase, model_dict): key = event[0]", "that were passed to the model (.in file) model_dict =", "str(sec_int).zfill(2) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time = time[:-2] + sec_int", "open(filename) as f: for ln in f: row = ln.split()", "model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" + etype + \".out\") # ------------------ #", "for event in truth_arr: phase = event[2] times = key_lookup(event,", "[] if key in model_dict.keys(): times = model_dict[key] times =", "\"-\" + tmp[2] try: # fails if date is missing", "# fails if date is missing floating point numbers formatted_time", "(might need to add this) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\"", "Test outputs: # - type of event tested [EQS, EQP,", "event[0] + \"-\" + event[1] + \"-\" + phase times", "+ \".out\" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append(\"comparison_out/comp.\" + etype + \".out\")", "arrivals to a dictionary (key=truncated timestamp) model_in = model_in_to_array(inf) #", "t_upper: offset = t - time # or format time", "sec_int = str(sec_int).zfill(2) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") time = time[:-2]", "time[:-2] + sec_int time = datetime.strptime(time, \"%Y-%m-%dT%H:%M:%S\") + time_diff time", "model_dict) if len(times) == 0: if phase == 'P': phase", "59: sec_int = sec_int - 60 sec_int = str(sec_int).zfill(2) time", "import timedelta # params padding_time = 10 fudge_factor = timedelta(seconds=27)", "sec_int - 60 sec_int = str(sec_int).zfill(2) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\")", "phase, model_dict) if len(times) == 0: phase = 'N' times", "absolute value: abs(t - time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return", "import datetime import sys import os import csv from datetime", "timestamps.append(time) return timestamps def filter_times(arrivals, model_in): filtered = [] for", "dirs parsed_arrivals = [] model_in = [] model_out = []", "return model_dict # lookup time in the dictionary def key_lookup(event,", "with open(filename) as f: for ln in f: row =", "= event[2] times = key_lookup(event, phase, model_dict) if len(times) ==", "time.second >=37 and time.second <=51: time = time + timedelta(seconds=23)", "add this) time = datetime.strftime(time, \"%Y-%m-%dT%H:%M:%S\") \"\"\" # case 2:", "times = ['nan'] outp_file.write(str(event[5]) + \" \" + phase) for", "N - not detected # - model time offset (t_truth", "read in Caltech model output and create a dictionary def", "etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = \"parsed_arrivals/\" + etype + \".arrivals.txt\"", "times = key_lookup(event, phase, model_dict) if len(times) == 0: if", "filtered = [] for key in model_in: if key in", "def time_lookup(t, time_arr): t_lower = t - timedelta(seconds=padding_time) t_upper =", "model_in: if key in arrivals: filtered.append(arrivals[key]) return filtered # read", "= datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S.%f\") - fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return", "# read in Caltech model output and create a dictionary", "is missing floating point numbers formatted_time = datetime.strptime(tmp[3], \"%Y-%m-%dT%H:%M:%S.%f\") if", "'w') truth_arr = read_arrivals_to_arr(arrival) # read in the arrival times", "within the padding time window def time_lookup(t, time_arr): t_lower =", "\".out\") # ------------------ # read in UW arrival times as", "+ event[1] + \"-\" + phase times = [] if", "files have correct timestamps \"\"\" time = datetime.strptime(entry, \"%Y-%m-%dT%H:%M:%S\") -", "for arrivals within the padding time window def time_lookup(t, time_arr):", "time in time_arr: if time > t_lower and time <", "\"-\" + entry[4:6] + \"-\" + entry[6:8] + \"T\" +", "'N' times = ['nan'] outp_file.write(str(event[5]) + \" \" + phase)" ]
[ "import get_backend, get_logger, get_s3Client, video backend_instance = get_backend() logger =", "chunking video\") logger.debug(\"extract_and_upload_video: Uploading video chunks\") with futures.ThreadPoolExecutor(8) as ex:", "lambda responses\") logger.debug(\"extract_and_upload_video: Finished inserting image metadata\") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir)", "video chunks\") with futures.ThreadPoolExecutor(8) as ex: for vid in os.listdir(chunked_video_dir):", "frame extraction\") for result_future in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response)", "in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key), \"chunks\",", "video to S3\") s3Client.upload_file( video_filename, bucket, video_key ) logger.debug(\"extract_and_upload_video: Finished", "in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command += \"\"\"('{img_id}', '{img_raw_path}', '{img_type}',", "img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES \"\"\" for i, (img_raw_path,", "datetime import json import os import posixpath import re import", "\"\"\"('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type,", "img_types = [\"png\" for frame in aws_lambda_response[\"frames\"]] metadatas = [", "\"s3_bucket_path\": bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\", basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]),", "from ultitrackerapi import get_backend, get_logger, get_s3Client, video backend_instance = get_backend()", "as ex: result_futures = [] for payload in aws_lambda_payloads: result_futures.append(ex.submit(", "json.dumps({ \"s3_bucket_path\": bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\", basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\",", "= json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths = [\"s3://\" + posixpath.join(frame[\"bucket\"], frame[\"key\"]) for", "img_metadata, game_id, frame_number) VALUES \"\"\" for i, (img_raw_path, img_type, img_metadata,", "img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma=\",\" if i < (len(img_raw_paths)", "Finished uploading video chunks\") logger.debug(\"extract_and_upload_video: Submitting lambda frame extraction\") aws_lambda_payloads", "thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id ) if __name__ == \"__main__\": main()", "video_filename, bucket, video_key ) logger.debug(\"extract_and_upload_video: Finished uploading video to S3\")", "uploading video chunks\") logger.debug(\"extract_and_upload_video: Submitting lambda frame extraction\") aws_lambda_payloads =", "2) logger.debug(\"extract_and_upload_video: Finished extracting thumbnail\") logger.debug(\"extract_and_upload_video: Uploading thumbnail\") s3Client.upload_file( thumbnail_filename,", "Payload=payload )) logger.debug(\"extract_and_upload_video: Submitted lambda frame extraction\") for result_future in", "extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id ) if __name__", "in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\", # InvocationType=\"Event\", Payload=payload )) logger.debug(\"extract_and_upload_video:", "raw_paths, img_types, metadatas, game_id, frame_numbers ) logger.debug(\"extract_and_upload_video: Received all lambda", "SET data = jsonb_set(data, '{{length}}', '\"{video_length}\"', true) WHERE game_id =", "posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width }).encode() for basename in os.listdir(chunked_video_dir) ] client", "import posixpath import re import shutil import tempfile import uuid", "int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number * chunk_multiplier + frame_number def insert_images( img_raw_paths,", "result_future in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths = [\"s3://\"", "basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width }).encode() for basename", "enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command += \"\"\"('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}',", "update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video: Finished updating length in db\") logger.debug(\"extract_and_upload_video: Extracting", "[\"s3://\" + posixpath.join(frame[\"bucket\"], frame[\"key\"]) for frame in aws_lambda_response[\"frames\"]] img_types =", "jsonb_set(data, '{{length}}', '\"{video_length}\"', true) WHERE game_id = '{game_id}' \"\"\".format( video_length=video_length,", "{frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma=\",\" if", "= [] with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures = [] for", "def main(): parser = argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\")", "responses\") logger.debug(\"extract_and_upload_video: Finished inserting image metadata\") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def", "= video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished getting height and width\") logger.debug(\"extract_and_upload_video: Updating", "tempfile import uuid from concurrent import futures from multiprocessing import", "i < (len(img_raw_paths) - 1) else \"\" ) backend_instance.client.execute(command) def", "logger.debug(\"extract_and_upload_video: Uploading thumbnail\") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key ) logger.debug(\"extract_and_upload_video: Finished", "): command = \"\"\" INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type,", "video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id ) if __name__ == \"__main__\":", "Uploading video to S3\") s3Client.upload_file( video_filename, bucket, video_key ) logger.debug(\"extract_and_upload_video:", "futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths = [\"s3://\" + posixpath.join(frame[\"bucket\"],", "= [-1 for frame in aws_lambda_response[\"frames\"]] insert_images( raw_paths, img_types, metadatas,", "logger.debug(\"extract_and_upload_video: Received all lambda responses\") logger.debug(\"extract_and_upload_video: Finished inserting image metadata\")", "data = jsonb_set(data, '{{length}}', '\"{video_length}\"', true) WHERE game_id = '{game_id}'", ") backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number =", "S3\") s3Client.upload_file( video_filename, bucket, video_key ) logger.debug(\"extract_and_upload_video: Finished uploading video", "updating length in db\") logger.debug(\"extract_and_upload_video: Extracting thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds", "= get_s3Client() def update_game_video_length(game_id, video_length): command = \"\"\" UPDATE ultitracker.game_metadata", "- 1) else \"\" ) backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename,", "command = \"\"\" INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata,", "and width\") logger.debug(\"extract_and_upload_video: Updating length in db\") update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video:", "json import os import posixpath import re import shutil import", "parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id ) if", "\"chunks\", vid ) ) logger.debug(\"extract_and_upload_video: Finished uploading video chunks\") logger.debug(\"extract_and_upload_video:", "get_backend() logger = get_logger(__name__, level=\"DEBUG\") s3Client = get_s3Client() def update_game_video_length(game_id,", "(img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command", "bucket, thumbnail_key ) logger.debug(\"extract_and_upload_video: Finished uploading thumbnail\") logger.debug(\"extract_and_upload_video: Uploading video", "img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command +=", "logger = get_logger(__name__, level=\"DEBUG\") s3Client = get_s3Client() def update_game_video_length(game_id, video_length):", "[] for payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\", # InvocationType=\"Event\",", "# InvocationType=\"Event\", Payload=payload )) logger.debug(\"extract_and_upload_video: Submitted lambda frame extraction\") for", "[-1 for frame in aws_lambda_response[\"frames\"]] insert_images( raw_paths, img_types, metadatas, game_id,", "\"\"\" UPDATE ultitracker.game_metadata SET data = jsonb_set(data, '{{length}}', '\"{video_length}\"', true)", "frame_numbers)): command += \"\"\"('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} \"\"\".format(", "video backend_instance = get_backend() logger = get_logger(__name__, level=\"DEBUG\") s3Client =", "s3Client = get_s3Client() def update_game_video_length(game_id, video_length): command = \"\"\" UPDATE", "FunctionName=\"extractFrames\", # InvocationType=\"Event\", Payload=payload )) logger.debug(\"extract_and_upload_video: Submitted lambda frame extraction\")", "uuid from concurrent import futures from multiprocessing import Pool from", "to S3\") s3Client.upload_file( video_filename, bucket, video_key ) logger.debug(\"extract_and_upload_video: Finished uploading", "= \"\"\" UPDATE ultitracker.game_metadata SET data = jsonb_set(data, '{{length}}', '\"{video_length}\"',", "\"frames\", posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width }).encode() for basename in os.listdir(chunked_video_dir) ]", "= str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished getting video length\") logger.debug(\"extract_and_upload_video: Getting video", "video_key ) logger.debug(\"extract_and_upload_video: Finished uploading video to S3\") logger.debug(\"extract_and_upload_video: Chunking", "return chunk_number * chunk_multiplier + frame_number def insert_images( img_raw_paths, img_types,", "video height and width\") video_height_width = video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished getting", "for payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\", # InvocationType=\"Event\", Payload=payload", "for result_future in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths =", "re import shutil import tempfile import uuid from concurrent import", "import datetime import json import os import posixpath import re", "vid), bucket, posixpath.join( posixpath.dirname(video_key), \"chunks\", vid ) ) logger.debug(\"extract_and_upload_video: Finished", ")) logger.debug(\"extract_and_upload_video: Submitted lambda frame extraction\") for result_future in futures.as_completed(result_futures):", "time=video_length_seconds // 2) logger.debug(\"extract_and_upload_video: Finished extracting thumbnail\") logger.debug(\"extract_and_upload_video: Uploading thumbnail\")", "parser = argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args", "= get_logger(__name__, level=\"DEBUG\") s3Client = get_s3Client() def update_game_video_length(game_id, video_length): command", "payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\", # InvocationType=\"Event\", Payload=payload ))", "logger.debug(\"extract_and_upload_video: Extracting thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2) logger.debug(\"extract_and_upload_video: Finished", "img_type, img_metadata, game_id, frame_number) VALUES \"\"\" for i, (img_raw_path, img_type,", "Finished uploading video to S3\") logger.debug(\"extract_and_upload_video: Chunking video\") chunked_video_dir =", "WHERE game_id = '{game_id}' \"\"\".format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def", "chunk_multiplier + frame_number def insert_images( img_raw_paths, img_types, img_metadatas, game_id, frame_numbers", "height and width\") logger.debug(\"extract_and_upload_video: Updating length in db\") update_game_video_length(game_id, video_length)", "logger.debug(\"extract_and_upload_video: Finished chunking video\") logger.debug(\"extract_and_upload_video: Uploading video chunks\") with futures.ThreadPoolExecutor(8)", "aws_lambda_payloads = [ json.dumps({ \"s3_bucket_path\": bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\", basename),", "}).encode() for basename in os.listdir(chunked_video_dir) ] client = boto3.client('lambda') aws_lambda_responses", "get_logger, get_s3Client, video backend_instance = get_backend() logger = get_logger(__name__, level=\"DEBUG\")", "video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2) logger.debug(\"extract_and_upload_video: Finished extracting thumbnail\") logger.debug(\"extract_and_upload_video:", "frame_numbers ): command = \"\"\" INSERT INTO ultitracker.img_location (img_id, img_raw_path,", "\"\" ) backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key, thumbnail_key,", "+ frame_number def insert_images( img_raw_paths, img_types, img_metadatas, game_id, frame_numbers ):", "result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\", # InvocationType=\"Event\", Payload=payload )) logger.debug(\"extract_and_upload_video: Submitted lambda", "width\") video_height_width = video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished getting height and width\")", "args = parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id", "frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number * chunk_multiplier", "frame[\"key\"]) for frame in aws_lambda_response[\"frames\"]] img_types = [\"png\" for frame", "img_types, img_metadatas, frame_numbers)): command += \"\"\"('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}',", "'{img_metadata}', '{game_id}', {frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number,", "frame_numbers = [-1 for frame in aws_lambda_response[\"frames\"]] insert_images( raw_paths, img_types,", "def update_game_video_length(game_id, video_length): command = \"\"\" UPDATE ultitracker.game_metadata SET data", "lambda frame extraction\") aws_lambda_payloads = [ json.dumps({ \"s3_bucket_path\": bucket, \"s3_video_path\":", "logger.debug(\"extract_and_upload_video: Finished getting height and width\") logger.debug(\"extract_and_upload_video: Updating length in", "extraction\") for result_future in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths", "bucket, video_filename, thumbnail_filename, video_key, thumbnail_key, game_id ): logger.debug(\"extract_and_upload_video: Getting video", "ultitracker.game_metadata SET data = jsonb_set(data, '{{length}}', '\"{video_length}\"', true) WHERE game_id", "import tempfile import uuid from concurrent import futures from multiprocessing", "import futures from multiprocessing import Pool from ultitrackerapi import get_backend,", "img_types, metadatas, game_id, frame_numbers ) logger.debug(\"extract_and_upload_video: Received all lambda responses\")", "lambda frame extraction\") for result_future in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\"))", "video\") logger.debug(\"extract_and_upload_video: Uploading video chunks\") with futures.ThreadPoolExecutor(8) as ex: for", "get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number", "get_s3Client, video backend_instance = get_backend() logger = get_logger(__name__, level=\"DEBUG\") s3Client", "extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key, thumbnail_key, game_id ): logger.debug(\"extract_and_upload_video: Getting", "aws_lambda_responses.append(aws_lambda_response) raw_paths = [\"s3://\" + posixpath.join(frame[\"bucket\"], frame[\"key\"]) for frame in", "video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished getting video length\") logger.debug(\"extract_and_upload_video: Getting", "Finished extracting thumbnail\") logger.debug(\"extract_and_upload_video: Uploading thumbnail\") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key", "aws_lambda_response[\"frames\"]] img_types = [\"png\" for frame in aws_lambda_response[\"frames\"]] metadatas =", "true) WHERE game_id = '{game_id}' \"\"\".format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command)", "for frame in aws_lambda_response[\"frames\"]] insert_images( raw_paths, img_types, metadatas, game_id, frame_numbers", "json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths = [\"s3://\" + posixpath.join(frame[\"bucket\"], frame[\"key\"]) for frame", "// 2) logger.debug(\"extract_and_upload_video: Finished extracting thumbnail\") logger.debug(\"extract_and_upload_video: Uploading thumbnail\") s3Client.upload_file(", "with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures = [] for payload in", "argparse import boto3 import datetime import json import os import", "frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command += \"\"\"('{img_id}', '{img_raw_path}',", "main(): parser = argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\")", "if i < (len(img_raw_paths) - 1) else \"\" ) backend_instance.client.execute(command)", "Getting video length\") video_length_seconds = int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video:", "'{{length}}', '\"{video_length}\"', true) WHERE game_id = '{game_id}' \"\"\".format( video_length=video_length, game_id=game_id", "INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES", "for frame in aws_lambda_response[\"frames\"] ] frame_numbers = [-1 for frame", "+= \"\"\"('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path,", "get_logger(__name__, level=\"DEBUG\") s3Client = get_s3Client() def update_game_video_length(game_id, video_length): command =", ") logger.debug(\"extract_and_upload_video: Finished uploading thumbnail\") logger.debug(\"extract_and_upload_video: Uploading video to S3\")", "1) else \"\" ) backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename, thumbnail_filename,", "aws_lambda_response[\"frames\"]] insert_images( raw_paths, img_types, metadatas, game_id, frame_numbers ) logger.debug(\"extract_and_upload_video: Received", "def insert_images( img_raw_paths, img_types, img_metadatas, game_id, frame_numbers ): command =", "img_metadatas, frame_numbers)): command += \"\"\"('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma}", "\"chunks\", basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width }).encode() for", "thumbnail_filename, video_key, thumbnail_key, game_id ): logger.debug(\"extract_and_upload_video: Getting video length\") video_length_seconds", "= int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number * chunk_multiplier +", "in aws_lambda_response[\"frames\"] ] frame_numbers = [-1 for frame in aws_lambda_response[\"frames\"]]", "logger.debug(\"extract_and_upload_video: Updating length in db\") update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video: Finished updating", "import re import shutil import tempfile import uuid from concurrent", "] client = boto3.client('lambda') aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16) as", "InvocationType=\"Event\", Payload=payload )) logger.debug(\"extract_and_upload_video: Submitted lambda frame extraction\") for result_future", "video_filename, thumbnail_filename, video_key, thumbnail_key, game_id ): logger.debug(\"extract_and_upload_video: Getting video length\")", "include_comma=\",\" if i < (len(img_raw_paths) - 1) else \"\" )", "as ex: for vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid),", "bucket, video_key ) logger.debug(\"extract_and_upload_video: Finished uploading video to S3\") logger.debug(\"extract_and_upload_video:", "posixpath.join( posixpath.dirname(video_key), \"chunks\", vid ) ) logger.debug(\"extract_and_upload_video: Finished uploading video", "= [\"png\" for frame in aws_lambda_response[\"frames\"]] metadatas = [ {\"bucket\":", "Finished uploading thumbnail\") logger.debug(\"extract_and_upload_video: Uploading video to S3\") s3Client.upload_file( video_filename,", "posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width }).encode() for basename in os.listdir(chunked_video_dir)", "video_key, thumbnail_key, game_id ): logger.debug(\"extract_and_upload_video: Getting video length\") video_length_seconds =", "= [ {\"bucket\": bucket} for frame in aws_lambda_response[\"frames\"] ] frame_numbers", "os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\")", "import os import posixpath import re import shutil import tempfile", "parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args = parser.parse_args() extract_and_upload_video(", "import argparse import boto3 import datetime import json import os", "): logger.debug(\"extract_and_upload_video: Getting video length\") video_length_seconds = int(video.get_video_duration(video_filename)) video_length =", "frame in aws_lambda_response[\"frames\"]] insert_images( raw_paths, img_types, metadatas, game_id, frame_numbers )", "vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key),", "from concurrent import futures from multiprocessing import Pool from ultitrackerapi", "thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2) logger.debug(\"extract_and_upload_video: Finished extracting thumbnail\")", "chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video: Finished chunking video\") logger.debug(\"extract_and_upload_video: Uploading video chunks\")", "parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args = parser.parse_args() extract_and_upload_video( bucket=args.bucket,", "\"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\", basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width", "img_raw_paths, img_types, img_metadatas, game_id, frame_numbers ): command = \"\"\" INSERT", "= argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args =", "logger.debug(\"extract_and_upload_video: Finished extracting thumbnail\") logger.debug(\"extract_and_upload_video: Uploading thumbnail\") s3Client.upload_file( thumbnail_filename, bucket,", "for vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join(", "chunk_number * chunk_multiplier + frame_number def insert_images( img_raw_paths, img_types, img_metadatas,", "video_height_width = video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished getting height and width\") logger.debug(\"extract_and_upload_video:", ") ) logger.debug(\"extract_and_upload_video: Finished uploading video chunks\") logger.debug(\"extract_and_upload_video: Submitting lambda", "command = \"\"\" UPDATE ultitracker.game_metadata SET data = jsonb_set(data, '{{length}}',", "video_height_width }).encode() for basename in os.listdir(chunked_video_dir) ] client = boto3.client('lambda')", "tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video: Finished chunking video\") logger.debug(\"extract_and_upload_video: Uploading", "= [ json.dumps({ \"s3_bucket_path\": bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\", basename), \"s3_output_frames_path\":", "posixpath.join(frame[\"bucket\"], frame[\"key\"]) for frame in aws_lambda_response[\"frames\"]] img_types = [\"png\" for", "thumbnail_key ) logger.debug(\"extract_and_upload_video: Finished uploading thumbnail\") logger.debug(\"extract_and_upload_video: Uploading video to", "logger.debug(\"extract_and_upload_video: Getting video height and width\") video_height_width = video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video:", "[\"png\" for frame in aws_lambda_response[\"frames\"]] metadatas = [ {\"bucket\": bucket}", "img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma=\",\" if i < (len(img_raw_paths) - 1)", "update_game_video_length(game_id, video_length): command = \"\"\" UPDATE ultitracker.game_metadata SET data =", "= \"\"\" INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id,", "Chunking video\") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video: Finished", "\"\"\" for i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types,", "client = boto3.client('lambda') aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16) as ex:", "shutil import tempfile import uuid from concurrent import futures from", "in db\") logger.debug(\"extract_and_upload_video: Extracting thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2)", "Uploading thumbnail\") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key ) logger.debug(\"extract_and_upload_video: Finished uploading", "metadatas = [ {\"bucket\": bucket} for frame in aws_lambda_response[\"frames\"] ]", "[] with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures = [] for payload", "img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma=\",\" if i <", "multiprocessing import Pool from ultitrackerapi import get_backend, get_logger, get_s3Client, video", "chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number * chunk_multiplier + frame_number def", "posixpath import re import shutil import tempfile import uuid from", "Finished updating length in db\") logger.debug(\"extract_and_upload_video: Extracting thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename,", "parser.add_argument(\"game_id\") args = parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key,", "thumbnail_filename, bucket, thumbnail_key ) logger.debug(\"extract_and_upload_video: Finished uploading thumbnail\") logger.debug(\"extract_and_upload_video: Uploading", "Received all lambda responses\") logger.debug(\"extract_and_upload_video: Finished inserting image metadata\") os.remove(video_filename)", "getting height and width\") logger.debug(\"extract_and_upload_video: Updating length in db\") update_game_video_length(game_id,", "uploading video to S3\") logger.debug(\"extract_and_upload_video: Chunking video\") chunked_video_dir = tempfile.mkdtemp()", "\"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma=\",\" if i", "logger.debug(\"extract_and_upload_video: Finished uploading thumbnail\") logger.debug(\"extract_and_upload_video: Uploading video to S3\") s3Client.upload_file(", "VALUES \"\"\" for i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths,", "ex: result_futures = [] for payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke,", "frame in aws_lambda_response[\"frames\"] ] frame_numbers = [-1 for frame in", "= int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished getting video length\")", ") logger.debug(\"extract_and_upload_video: Finished uploading video chunks\") logger.debug(\"extract_and_upload_video: Submitting lambda frame", "Submitted lambda frame extraction\") for result_future in futures.as_completed(result_futures): aws_lambda_response =", "parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args = parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename,", "for frame in aws_lambda_response[\"frames\"]] img_types = [\"png\" for frame in", "video_length_seconds = int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished getting video", "Finished getting video length\") logger.debug(\"extract_and_upload_video: Getting video height and width\")", "os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key), \"chunks\", vid", "in db\") update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video: Finished updating length in db\")", "metadata\") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"bucket\")", "INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES \"\"\"", "Uploading video chunks\") with futures.ThreadPoolExecutor(8) as ex: for vid in", "for frame in aws_lambda_response[\"frames\"]] metadatas = [ {\"bucket\": bucket} for", "Submitting lambda frame extraction\") aws_lambda_payloads = [ json.dumps({ \"s3_bucket_path\": bucket,", "argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args = parser.parse_args()", "def extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key, thumbnail_key, game_id ): logger.debug(\"extract_and_upload_video:", "video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video: Finished chunking video\") logger.debug(\"extract_and_upload_video: Uploading video", "= boto3.client('lambda') aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures", "for basename in os.listdir(chunked_video_dir) ] client = boto3.client('lambda') aws_lambda_responses =", "video_length) logger.debug(\"extract_and_upload_video: Finished updating length in db\") logger.debug(\"extract_and_upload_video: Extracting thumbnail\")", ") logger.debug(\"extract_and_upload_video: Received all lambda responses\") logger.debug(\"extract_and_upload_video: Finished inserting image", "extraction\") aws_lambda_payloads = [ json.dumps({ \"s3_bucket_path\": bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\",", "thumbnail\") logger.debug(\"extract_and_upload_video: Uploading video to S3\") s3Client.upload_file( video_filename, bucket, video_key", "all lambda responses\") logger.debug(\"extract_and_upload_video: Finished inserting image metadata\") os.remove(video_filename) os.remove(thumbnail_filename)", "os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key), \"chunks\", vid ) ) logger.debug(\"extract_and_upload_video:", "import boto3 import datetime import json import os import posixpath", "video_length): command = \"\"\" UPDATE ultitracker.game_metadata SET data = jsonb_set(data,", "logger.debug(\"extract_and_upload_video: Finished inserting image metadata\") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main():", "s3Client.upload_file( video_filename, bucket, video_key ) logger.debug(\"extract_and_upload_video: Finished uploading video to", "ultitrackerapi import get_backend, get_logger, get_s3Client, video backend_instance = get_backend() logger", "ex: for vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket,", "'{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata),", "boto3 import datetime import json import os import posixpath import", "os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\")", "logger.debug(\"extract_and_upload_video: Finished updating length in db\") logger.debug(\"extract_and_upload_video: Extracting thumbnail\") video.get_thumbnail(video_filename,", "thumbnail\") logger.debug(\"extract_and_upload_video: Uploading thumbnail\") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key ) logger.debug(\"extract_and_upload_video:", "aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures = []", "frame_number) VALUES \"\"\" for i, (img_raw_path, img_type, img_metadata, frame_number) in", "image metadata\") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser = argparse.ArgumentParser()", "\"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width }).encode() for basename in", "\"\"\" INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number)", "concurrent import futures from multiprocessing import Pool from ultitrackerapi import", "posixpath.join(posixpath.dirname(video_key), \"chunks\", basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]), \"video_metadata\": video_height_width }).encode()", "os import posixpath import re import shutil import tempfile import", "[ {\"bucket\": bucket} for frame in aws_lambda_response[\"frames\"] ] frame_numbers =", "video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1])", "bucket} for frame in aws_lambda_response[\"frames\"] ] frame_numbers = [-1 for", "bucket, posixpath.join( posixpath.dirname(video_key), \"chunks\", vid ) ) logger.debug(\"extract_and_upload_video: Finished uploading", "logger.debug(\"extract_and_upload_video: Submitted lambda frame extraction\") for result_future in futures.as_completed(result_futures): aws_lambda_response", "thumbnail_key, game_id ): logger.debug(\"extract_and_upload_video: Getting video length\") video_length_seconds = int(video.get_video_duration(video_filename))", "game_id, frame_numbers ): command = \"\"\" INSERT INTO ultitracker.img_location (img_id,", "thumbnail\") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key ) logger.debug(\"extract_and_upload_video: Finished uploading thumbnail\")", "logger.debug(\"extract_and_upload_video: Finished getting video length\") logger.debug(\"extract_and_upload_video: Getting video height and", "result_futures = [] for payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\",", "parser.add_argument(\"video_key\") parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args = parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename,", "= [] for payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\", #", "backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key, thumbnail_key, game_id ):", "i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)):", "frame extraction\") aws_lambda_payloads = [ json.dumps({ \"s3_bucket_path\": bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key),", "+ posixpath.join(frame[\"bucket\"], frame[\"key\"]) for frame in aws_lambda_response[\"frames\"]] img_types = [\"png\"", "(img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES \"\"\" for i,", "else \"\" ) backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key,", "= jsonb_set(data, '{{length}}', '\"{video_length}\"', true) WHERE game_id = '{game_id}' \"\"\".format(", "metadatas, game_id, frame_numbers ) logger.debug(\"extract_and_upload_video: Received all lambda responses\") logger.debug(\"extract_and_upload_video:", "raw_paths = [\"s3://\" + posixpath.join(frame[\"bucket\"], frame[\"key\"]) for frame in aws_lambda_response[\"frames\"]]", "aws_lambda_response[\"frames\"]] metadatas = [ {\"bucket\": bucket} for frame in aws_lambda_response[\"frames\"]", "length\") logger.debug(\"extract_and_upload_video: Getting video height and width\") video_height_width = video.get_video_height_width(video_filename)", "frame in aws_lambda_response[\"frames\"]] img_types = [\"png\" for frame in aws_lambda_response[\"frames\"]]", "bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id ) if __name__ ==", "in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths = [\"s3://\" +", "get_s3Client() def update_game_video_length(game_id, video_length): command = \"\"\" UPDATE ultitracker.game_metadata SET", "logger.debug(\"extract_and_upload_video: Getting video length\") video_length_seconds = int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds))", "db\") logger.debug(\"extract_and_upload_video: Extracting thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2) logger.debug(\"extract_and_upload_video:", "in aws_lambda_response[\"frames\"]] metadatas = [ {\"bucket\": bucket} for frame in", "aws_lambda_response = json.loads(result_future.result()[\"Payload\"].read().decode(\"utf-8\")) aws_lambda_responses.append(aws_lambda_response) raw_paths = [\"s3://\" + posixpath.join(frame[\"bucket\"], frame[\"key\"])", "'{game_id}', {frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma=\",\"", "logger.debug(\"extract_and_upload_video: Uploading video to S3\") s3Client.upload_file( video_filename, bucket, video_key )", "length in db\") logger.debug(\"extract_and_upload_video: Extracting thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds //", "UPDATE ultitracker.game_metadata SET data = jsonb_set(data, '{{length}}', '\"{video_length}\"', true) WHERE", "img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma=\",\" if i < (len(img_raw_paths) -", "in aws_lambda_response[\"frames\"]] img_types = [\"png\" for frame in aws_lambda_response[\"frames\"]] metadatas", "S3\") logger.debug(\"extract_and_upload_video: Chunking video\") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60)", "frame in aws_lambda_response[\"frames\"]] metadatas = [ {\"bucket\": bucket} for frame", "s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key ) logger.debug(\"extract_and_upload_video: Finished uploading thumbnail\") logger.debug(\"extract_and_upload_video:", "length in db\") update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video: Finished updating length in", "video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished getting height and width\") logger.debug(\"extract_and_upload_video: Updating length", "[ json.dumps({ \"s3_bucket_path\": bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\", basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key),", "game_id = '{game_id}' \"\"\".format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key,", "Finished getting height and width\") logger.debug(\"extract_and_upload_video: Updating length in db\")", "thumbnail_filename, time=video_length_seconds // 2) logger.debug(\"extract_and_upload_video: Finished extracting thumbnail\") logger.debug(\"extract_and_upload_video: Uploading", "futures from multiprocessing import Pool from ultitrackerapi import get_backend, get_logger,", "int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished getting video length\") logger.debug(\"extract_and_upload_video:", "= get_backend() logger = get_logger(__name__, level=\"DEBUG\") s3Client = get_s3Client() def", "import json import os import posixpath import re import shutil", "aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName=\"extractFrames\", # InvocationType=\"Event\", Payload=payload )) logger.debug(\"extract_and_upload_video: Submitted", "os.listdir(chunked_video_dir) ] client = boto3.client('lambda') aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16)", "ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES \"\"\" for", "getting video length\") logger.debug(\"extract_and_upload_video: Getting video height and width\") video_height_width", "to S3\") logger.debug(\"extract_and_upload_video: Chunking video\") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir,", "bucket, \"s3_video_path\": posixpath.join(posixpath.dirname(video_key), \"chunks\", basename), \"s3_output_frames_path\": posixpath.join(posixpath.dirname(video_key), \"frames\", posixpath.splitext(basename)[0]), \"video_metadata\":", "and width\") video_height_width = video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished getting height and", "video chunks\") logger.debug(\"extract_and_upload_video: Submitting lambda frame extraction\") aws_lambda_payloads = [", "'{game_id}' \"\"\".format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number", "logger.debug(\"extract_and_upload_video: Uploading video chunks\") with futures.ThreadPoolExecutor(8) as ex: for vid", "{\"bucket\": bucket} for frame in aws_lambda_response[\"frames\"] ] frame_numbers = [-1", "'{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id,", "img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command += \"\"\"('{img_id}',", "chunks\") logger.debug(\"extract_and_upload_video: Submitting lambda frame extraction\") aws_lambda_payloads = [ json.dumps({", "game_id, frame_numbers ) logger.debug(\"extract_and_upload_video: Received all lambda responses\") logger.debug(\"extract_and_upload_video: Finished", "insert_images( img_raw_paths, img_types, img_metadatas, game_id, frame_numbers ): command = \"\"\"", "posixpath.dirname(video_key), \"chunks\", vid ) ) logger.debug(\"extract_and_upload_video: Finished uploading video chunks\")", "import shutil import tempfile import uuid from concurrent import futures", "logger.debug(\"extract_and_upload_video: Finished uploading video chunks\") logger.debug(\"extract_and_upload_video: Submitting lambda frame extraction\")", "for i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas,", "insert_images( raw_paths, img_types, metadatas, game_id, frame_numbers ) logger.debug(\"extract_and_upload_video: Received all", "img_metadatas, game_id, frame_numbers ): command = \"\"\" INSERT INTO ultitracker.img_location", "int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number * chunk_multiplier + frame_number", "chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number *", "with futures.ThreadPoolExecutor(8) as ex: for vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file,", "img_types, img_metadatas, game_id, frame_numbers ): command = \"\"\" INSERT INTO", "futures.ThreadPoolExecutor(8) as ex: for vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir,", "Updating length in db\") update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video: Finished updating length", "in os.listdir(chunked_video_dir) ] client = boto3.client('lambda') aws_lambda_responses = [] with", "Finished chunking video\") logger.debug(\"extract_and_upload_video: Uploading video chunks\") with futures.ThreadPoolExecutor(8) as", "s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key), \"chunks\", vid ) )", "chunks\") with futures.ThreadPoolExecutor(8) as ex: for vid in os.listdir(chunked_video_dir): ex.submit(", "video length\") logger.debug(\"extract_and_upload_video: Getting video height and width\") video_height_width =", "* chunk_multiplier + frame_number def insert_images( img_raw_paths, img_types, img_metadatas, game_id,", "futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures = [] for payload in aws_lambda_payloads:", "in aws_lambda_response[\"frames\"]] insert_images( raw_paths, img_types, metadatas, game_id, frame_numbers ) logger.debug(\"extract_and_upload_video:", "frame_numbers ) logger.debug(\"extract_and_upload_video: Received all lambda responses\") logger.debug(\"extract_and_upload_video: Finished inserting", "basename in os.listdir(chunked_video_dir) ] client = boto3.client('lambda') aws_lambda_responses = []", "< (len(img_raw_paths) - 1) else \"\" ) backend_instance.client.execute(command) def extract_and_upload_video(", "import uuid from concurrent import futures from multiprocessing import Pool", "Pool from ultitrackerapi import get_backend, get_logger, get_s3Client, video backend_instance =", "command += \"\"\"('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} \"\"\".format( img_id=uuid.uuid4(),", "video length\") video_length_seconds = int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished", "import Pool from ultitrackerapi import get_backend, get_logger, get_s3Client, video backend_instance", "aws_lambda_response[\"frames\"] ] frame_numbers = [-1 for frame in aws_lambda_response[\"frames\"]] insert_images(", "logger.debug(\"extract_and_upload_video: Chunking video\") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video:", "video to S3\") logger.debug(\"extract_and_upload_video: Chunking video\") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename,", "\"\"\".format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number =", "from multiprocessing import Pool from ultitrackerapi import get_backend, get_logger, get_s3Client,", "str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished getting video length\") logger.debug(\"extract_and_upload_video: Getting video height", "def get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return", "= int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1]) return chunk_number * chunk_multiplier + frame_number def insert_images(", "backend_instance = get_backend() logger = get_logger(__name__, level=\"DEBUG\") s3Client = get_s3Client()", "parser.add_argument(\"thumbnail_key\") parser.add_argument(\"game_id\") args = parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key,", "Getting video height and width\") video_height_width = video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished", "frame_number def insert_images( img_raw_paths, img_types, img_metadatas, game_id, frame_numbers ): command", "game_id ): logger.debug(\"extract_and_upload_video: Getting video length\") video_length_seconds = int(video.get_video_duration(video_filename)) video_length", "= parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id )", "width\") logger.debug(\"extract_and_upload_video: Updating length in db\") update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video: Finished", "] frame_numbers = [-1 for frame in aws_lambda_response[\"frames\"]] insert_images( raw_paths,", "get_backend, get_logger, get_s3Client, video backend_instance = get_backend() logger = get_logger(__name__,", "game_id, frame_number) VALUES \"\"\" for i, (img_raw_path, img_type, img_metadata, frame_number)", "game_id=game_id, frame_number=frame_number, include_comma=\",\" if i < (len(img_raw_paths) - 1) else", "= [\"s3://\" + posixpath.join(frame[\"bucket\"], frame[\"key\"]) for frame in aws_lambda_response[\"frames\"]] img_types", "client.invoke, FunctionName=\"extractFrames\", # InvocationType=\"Event\", Payload=payload )) logger.debug(\"extract_and_upload_video: Submitted lambda frame", "Finished inserting image metadata\") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser", "extracting thumbnail\") logger.debug(\"extract_and_upload_video: Uploading thumbnail\") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key )", ") backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key, thumbnail_key, game_id", "logger.debug(\"extract_and_upload_video: Submitting lambda frame extraction\") aws_lambda_payloads = [ json.dumps({ \"s3_bucket_path\":", "shutil.rmtree(chunked_video_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument(\"bucket\") parser.add_argument(\"video_filename\") parser.add_argument(\"thumbnail_filename\") parser.add_argument(\"video_key\")", "(len(img_raw_paths) - 1) else \"\" ) backend_instance.client.execute(command) def extract_and_upload_video( bucket,", "\"video_metadata\": video_height_width }).encode() for basename in os.listdir(chunked_video_dir) ] client =", "vid ) ) logger.debug(\"extract_and_upload_video: Finished uploading video chunks\") logger.debug(\"extract_and_upload_video: Submitting", "game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number", "inserting image metadata\") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser =", "uploading thumbnail\") logger.debug(\"extract_and_upload_video: Uploading video to S3\") s3Client.upload_file( video_filename, bucket,", "boto3.client('lambda') aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures =", "video\") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video: Finished chunking", "ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key), \"chunks\", vid )", "height and width\") video_height_width = video.get_video_height_width(video_filename) logger.debug(\"extract_and_upload_video: Finished getting height", "Extracting thumbnail\") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2) logger.debug(\"extract_and_upload_video: Finished extracting", "= tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video: Finished chunking video\") logger.debug(\"extract_and_upload_video:", ") logger.debug(\"extract_and_upload_video: Finished uploading video to S3\") logger.debug(\"extract_and_upload_video: Chunking video\")", "chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug(\"extract_and_upload_video: Finished chunking video\")", "'\"{video_length}\"', true) WHERE game_id = '{game_id}' \"\"\".format( video_length=video_length, game_id=game_id )", "db\") update_game_video_length(game_id, video_length) logger.debug(\"extract_and_upload_video: Finished updating length in db\") logger.debug(\"extract_and_upload_video:", "length\") video_length_seconds = int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug(\"extract_and_upload_video: Finished getting", "backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split(\"_\")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split(\"_\")[1])", "frame_number=frame_number, include_comma=\",\" if i < (len(img_raw_paths) - 1) else \"\"", "= '{game_id}' \"\"\".format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60):", "level=\"DEBUG\") s3Client = get_s3Client() def update_game_video_length(game_id, video_length): command = \"\"\"", "logger.debug(\"extract_and_upload_video: Finished uploading video to S3\") logger.debug(\"extract_and_upload_video: Chunking video\") chunked_video_dir", "chunk_size=60) logger.debug(\"extract_and_upload_video: Finished chunking video\") logger.debug(\"extract_and_upload_video: Uploading video chunks\") with" ]
[ "predictions_3 = learner_3.predict(x_test) # --- SECTION 7 --- # Accuracies", "= breast_cancer.data, breast_cancer.target # Split the train and test samples", "mpl.style.use('seaborn-paper') # --- SECTION 2 --- # Get the wrongly", "y_3, 1, label='50NN') plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive',", "--- SECTION 3 --- # Instantiate the voting classifier voting", "# --- SECTION 2 --- # Instantiate the learners (classifiers)", "Plot the predicted probaiblity of each base learner as #", "samples test_samples = 100 x_train, y_train = x[:-test_samples], y[:-test_samples] x_test,", "classifier with the training data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train,", "base learner as # a bar and the average probability", "voting = VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)], voting='soft') #", "the average predicted probability # x=[] y_1=[] y_2=[] y_3=[] y_avg=[]", "--- SECTION 1 --- # Import the required libraries from", "# Predict the most probable class hard_predictions = voting.predict(x_test) #", "# Instantiate the learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 =", "zorder=10) y = [0.5 for x in range(len(errors))] plt.plot(y, c='k',", "hard_predictions)) # --- SECTION 1 --- # Import the required", "# --- SECTION 2 --- # Store the predicted probability", "svm, neighbors from sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score", "predicted probability # x=[] y_1=[] y_2=[] y_3=[] y_avg=[] for i", "libraries import matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('seaborn-paper')", "3 --- # Instantiate the voting classifier voting = VotingClassifier([('5NN',", "average probability as an X plt.bar(x, y_1, 3, label='5NN') plt.bar(x,", "--- # Import the required libraries import matplotlib as mpl", "and test samples test_samples = 100 x_train, y_train = x[:-test_samples],", "('NB', learner_2), ('50NN', learner_3)], voting='soft') # --- SECTION 4 ---", "Fit classifier with the training data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train)", "most probable class hard_predictions = voting.predict(x_test) # --- SECTION 6", "as # a bar and the average probability as an", "SECTION 2 --- # Instantiate the learners (classifiers) learner_1 =", "predicted instance, for each base learner # as well as", "and the predicted probabilities for the whole test set errors", "# --- SECTION 1 --- # Import the required libraries", "as well as the average predicted probability # x=[] y_1=[]", "class hard_predictions = voting.predict(x_test) # --- SECTION 6 --- #", "--- SECTION 4 --- # Fit classifier with the training", "# Store the predicted probability for # each wrongly predicted", "learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) # --- SECTION 7 --- #", "probability # x=[] y_1=[] y_2=[] y_3=[] y_avg=[] for i in", "learner_3)], voting='soft') # --- SECTION 4 --- # Fit classifier", "learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) # --- SECTION 2 --- #", "of each base learner as # a bar and the", "voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) # ---", "--- # Import the required libraries from sklearn import datasets,", "# Fit classifier with the training data voting.fit(x_train, y_train) learner_1.fit(x_train,", "y_2, 2, label='NB') plt.bar(x, y_3, 1, label='50NN') plt.scatter(x, y_avg, marker='x',", "--- SECTION 2 --- # Instantiate the learners (classifiers) learner_1", "--- # Get the base learner predictions predictions_1 = learner_1.predict(x_test)", "= learner_3.predict_proba(x_test) # --- SECTION 2 --- # Store the", "SECTION 1 --- # Import the required libraries import matplotlib", "i in range(len(errors)): if not errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0])", "print('Hard Voting:', accuracy_score(y_test, hard_predictions)) # --- SECTION 1 --- #", "y_1, 3, label='5NN') plt.bar(x, y_2, 2, label='NB') plt.bar(x, y_3, 1,", "probability as an X plt.bar(x, y_1, 3, label='5NN') plt.bar(x, y_2,", "# --- SECTION 5 --- # Predict the most probable", "matplotlib.pyplot as plt mpl.style.use('seaborn-paper') # --- SECTION 2 --- #", "# --- SECTION 6 --- # Get the base learner", "1 --- # Import the required libraries import matplotlib as", "the required libraries from sklearn import datasets, naive_bayes, svm, neighbors", "s=150, label='Average Positive', zorder=10) y = [0.5 for x in", "y_3=[] y_avg=[] for i in range(len(errors)): if not errors[i] ==", "[0.5 for x in range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive Probability')", "x in range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive Probability') plt.xlabel('Test sample')", "--- # Plot the predicted probaiblity of each base learner", "SECTION 5 --- # Predict the most probable class hard_predictions", "predictions predictions_1 = learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test)", "= x[:-test_samples], y[:-test_samples] x_test, y_test = x[-test_samples:], y[-test_samples:] # ---", "SECTION 2 --- # Get the wrongly predicted instances #", "# --- SECTION 2 --- # Get the wrongly predicted", "= learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) # --- SECTION 2 ---", "predicted instances # and the predicted probabilities for the whole", "hard_predictions = voting.predict(x_test) # --- SECTION 6 --- # Get", "x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3 ---", "bar and the average probability as an X plt.bar(x, y_1,", "classifier voting = VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)], voting='soft')", "breast_cancer = datasets.load_breast_cancer() x, y = breast_cancer.data, breast_cancer.target # Split", "the average probability as an X plt.bar(x, y_1, 3, label='5NN')", "datasets, naive_bayes, svm, neighbors from sklearn.ensemble import VotingClassifier from sklearn.metrics", "probability for # each wrongly predicted instance, for each base", "neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION", "label='NB') plt.bar(x, y_3, 1, label='50NN') plt.scatter(x, y_avg, marker='x', c='k', s=150,", "--- # Fit classifier with the training data voting.fit(x_train, y_train)", "dataset breast_cancer = datasets.load_breast_cancer() x, y = breast_cancer.data, breast_cancer.target #", "y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10) y = [0.5", "matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('seaborn-paper') # ---", "y[:-test_samples] x_test, y_test = x[-test_samples:], y[-test_samples:] # --- SECTION 2", "# as well as the average predicted probability # x=[]", "learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3))", "neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3 --- # Instantiate the voting", "Store the predicted probability for # each wrongly predicted instance,", "= neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # ---", "3, label='5NN') plt.bar(x, y_2, 2, label='NB') plt.bar(x, y_3, 1, label='50NN')", "accuracy_score(y_test, hard_predictions)) # --- SECTION 1 --- # Import the", "probabilities for the whole test set errors = y_test-hard_predictions probabilities_1", "predicted probability for # each wrongly predicted instance, for each", "print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) #", "learner_3.predict(x_test) # --- SECTION 7 --- # Accuracies of base", "with the training data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train)", "# Get the base learner predictions predictions_1 = learner_1.predict(x_test) predictions_2", "the wrongly predicted instances # and the predicted probabilities for", "Instantiate the voting classifier voting = VotingClassifier([('5NN', learner_1), ('NB', learner_2),", "--- # Get the wrongly predicted instances # and the", "errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # ---", "well as the average predicted probability # x=[] y_1=[] y_2=[]", "y_avg=[] for i in range(len(errors)): if not errors[i] == 0:", "y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test)", "Predict the most probable class hard_predictions = voting.predict(x_test) # ---", "= 100 x_train, y_train = x[:-test_samples], y[:-test_samples] x_test, y_test =", "label='50NN') plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10) y", "x_test, y_test = x[-test_samples:], y[-test_samples:] # --- SECTION 2 ---", "learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3 =", "of hard voting print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions)) # ---", "= VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)], voting='soft') # ---", "each wrongly predicted instance, for each base learner # as", "plt.bar(x, y_3, 1, label='50NN') plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average", "--- # Store the predicted probability for # each wrongly", "test samples test_samples = 100 x_train, y_train = x[:-test_samples], y[:-test_samples]", "errors = y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3", "SECTION 3 --- # Instantiate the voting classifier voting =", "--- SECTION 7 --- # Accuracies of base learners print('L1:',", "import matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('seaborn-paper') #", "c='k', s=150, label='Average Positive', zorder=10) y = [0.5 for x", "voting.predict(x_test) # --- SECTION 6 --- # Get the base", "in range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive Probability') plt.xlabel('Test sample') plt.ylabel('probability')", "predictions_2 = learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) # --- SECTION 7", "Get the wrongly predicted instances # and the predicted probabilities", "== 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION", "naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3 --- #", "= learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) # --- SECTION 7 ---", "3 --- # Plot the predicted probaiblity of each base", "= learner_3.predict(x_test) # --- SECTION 7 --- # Accuracies of", "x=[] y_1=[] y_2=[] y_3=[] y_avg=[] for i in range(len(errors)): if", "2, label='NB') plt.bar(x, y_3, 1, label='50NN') plt.scatter(x, y_avg, marker='x', c='k',", "# Accuracies of base learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test,", "y = breast_cancer.data, breast_cancer.target # Split the train and test", "predicted probabilities for the whole test set errors = y_test-hard_predictions", "# --- SECTION 3 --- # Plot the predicted probaiblity", "print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions)) # --- SECTION 1 ---", "learner predictions predictions_1 = learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3 =", "learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) # --- SECTION 5", "if not errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3)", "the predicted probabilities for the whole test set errors =", "--- SECTION 2 --- # Get the wrongly predicted instances", "# Instantiate the voting classifier voting = VotingClassifier([('5NN', learner_1), ('NB',", "import accuracy_score # Load the dataset breast_cancer = datasets.load_breast_cancer() x,", "base learner predictions predictions_1 = learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3", "= neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3 --- # Instantiate the", "--- # Instantiate the voting classifier voting = VotingClassifier([('5NN', learner_1),", "probabilities_3 = learner_3.predict_proba(x_test) # --- SECTION 2 --- # Store", "learner_1), ('NB', learner_2), ('50NN', learner_3)], voting='soft') # --- SECTION 4", "= y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 =", "base learner # as well as the average predicted probability", "import matplotlib.pyplot as plt mpl.style.use('seaborn-paper') # --- SECTION 2 ---", "learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) #", "# Split the train and test samples test_samples = 100", "# --- SECTION 7 --- # Accuracies of base learners", "marker='x', c='k', s=150, label='Average Positive', zorder=10) y = [0.5 for", "SECTION 2 --- # Store the predicted probability for #", "learner_3.fit(x_train, y_train) # --- SECTION 5 --- # Predict the", "breast_cancer.data, breast_cancer.target # Split the train and test samples test_samples", "y_test = x[-test_samples:], y[-test_samples:] # --- SECTION 2 --- #", "whole test set errors = y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2", "X plt.bar(x, y_1, 3, label='5NN') plt.bar(x, y_2, 2, label='NB') plt.bar(x,", "test_samples = 100 x_train, y_train = x[:-test_samples], y[:-test_samples] x_test, y_test", "# Load the dataset breast_cancer = datasets.load_breast_cancer() x, y =", "--- SECTION 6 --- # Get the base learner predictions", "# Plot the predicted probaiblity of each base learner as", "--- SECTION 5 --- # Predict the most probable class", "probaiblity of each base learner as # a bar and", "learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3 --- # Instantiate", "probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) #", "accuracy_score(y_test, predictions_3)) # Accuracy of hard voting print('-'*30) print('Hard Voting:',", "x, y = breast_cancer.data, breast_cancer.target # Split the train and", "learner as # a bar and the average probability as", "predicted probaiblity of each base learner as # a bar", "y_train = x[:-test_samples], y[:-test_samples] x_test, y_test = x[-test_samples:], y[-test_samples:] #", "= learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) # ---", "Accuracies of base learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2))", "# x=[] y_1=[] y_2=[] y_3=[] y_avg=[] for i in range(len(errors)):", "learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) # --- SECTION", "from sklearn.metrics import accuracy_score # Load the dataset breast_cancer =", "average predicted probability # x=[] y_1=[] y_2=[] y_3=[] y_avg=[] for", "learner # as well as the average predicted probability #", "for # each wrongly predicted instance, for each base learner", "from sklearn import datasets, naive_bayes, svm, neighbors from sklearn.ensemble import", "(classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50)", "2 --- # Instantiate the learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5)", "y_2=[] y_3=[] y_avg=[] for i in range(len(errors)): if not errors[i]", "label='Average Positive', zorder=10) y = [0.5 for x in range(len(errors))]", "y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) # --- SECTION", "for each base learner # as well as the average", "the train and test samples test_samples = 100 x_train, y_train", "Get the base learner predictions predictions_1 = learner_1.predict(x_test) predictions_2 =", "SECTION 6 --- # Get the base learner predictions predictions_1", "naive_bayes, svm, neighbors from sklearn.ensemble import VotingClassifier from sklearn.metrics import", "('50NN', learner_3)], voting='soft') # --- SECTION 4 --- # Fit", "--- SECTION 3 --- # Plot the predicted probaiblity of", "plt mpl.style.use('seaborn-paper') # --- SECTION 2 --- # Get the", "the predicted probability for # each wrongly predicted instance, for", "sklearn import datasets, naive_bayes, svm, neighbors from sklearn.ensemble import VotingClassifier", "range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive Probability') plt.xlabel('Test sample') plt.ylabel('probability') plt.legend()", "data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) #", "y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3 --- # Plot the", "datasets.load_breast_cancer() x, y = breast_cancer.data, breast_cancer.target # Split the train", "SECTION 3 --- # Plot the predicted probaiblity of each", "= voting.predict(x_test) # --- SECTION 6 --- # Get the", "y_1=[] y_2=[] y_3=[] y_avg=[] for i in range(len(errors)): if not", "required libraries import matplotlib as mpl import matplotlib.pyplot as plt", "7 --- # Accuracies of base learners print('L1:', accuracy_score(y_test, predictions_1))", "for i in range(len(errors)): if not errors[i] == 0: x.append(i)", "y[-test_samples:] # --- SECTION 2 --- # Instantiate the learners", "# and the predicted probabilities for the whole test set", "accuracy_score # Load the dataset breast_cancer = datasets.load_breast_cancer() x, y", "4 --- # Fit classifier with the training data voting.fit(x_train,", "train and test samples test_samples = 100 x_train, y_train =", "set errors = y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test)", "# each wrongly predicted instance, for each base learner #", "Voting:', accuracy_score(y_test, hard_predictions)) # --- SECTION 1 --- # Import", "the predicted probaiblity of each base learner as # a", "accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy", "y_train) learner_3.fit(x_train, y_train) # --- SECTION 5 --- # Predict", "in range(len(errors)): if not errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0])", "y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3 --- # Plot", "each base learner as # a bar and the average", "wrongly predicted instances # and the predicted probabilities for the", "learner_3.predict_proba(x_test) # --- SECTION 2 --- # Store the predicted", "sklearn.metrics import accuracy_score # Load the dataset breast_cancer = datasets.load_breast_cancer()", "the base learner predictions predictions_1 = learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test)", "2 --- # Get the wrongly predicted instances # and", "neighbors from sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score #", "1 --- # Import the required libraries from sklearn import", "Split the train and test samples test_samples = 100 x_train,", "libraries from sklearn import datasets, naive_bayes, svm, neighbors from sklearn.ensemble", "= naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3 ---", "for the whole test set errors = y_test-hard_predictions probabilities_1 =", "the most probable class hard_predictions = voting.predict(x_test) # --- SECTION", "Import the required libraries from sklearn import datasets, naive_bayes, svm,", "= [0.5 for x in range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive", "voting='soft') # --- SECTION 4 --- # Fit classifier with", "of base learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:',", "y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) # --- SECTION 5 ---", "learner_2), ('50NN', learner_3)], voting='soft') # --- SECTION 4 --- #", "test set errors = y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 =", "# --- SECTION 4 --- # Fit classifier with the", "plt.bar(x, y_2, 2, label='NB') plt.bar(x, y_3, 1, label='50NN') plt.scatter(x, y_avg,", "the voting classifier voting = VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN',", "voting classifier voting = VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)],", "x[:-test_samples], y[:-test_samples] x_test, y_test = x[-test_samples:], y[-test_samples:] # --- SECTION", "Load the dataset breast_cancer = datasets.load_breast_cancer() x, y = breast_cancer.data,", "= x[-test_samples:], y[-test_samples:] # --- SECTION 2 --- # Instantiate", "the dataset breast_cancer = datasets.load_breast_cancer() x, y = breast_cancer.data, breast_cancer.target", "the learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3", "= learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) # ---", "y = [0.5 for x in range(len(errors))] plt.plot(y, c='k', linestyle='--')", "2 --- # Store the predicted probability for # each", "wrongly predicted instance, for each base learner # as well", "0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3", "1, label='50NN') plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10)", "# a bar and the average probability as an X", "required libraries from sklearn import datasets, naive_bayes, svm, neighbors from", "for x in range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive Probability') plt.xlabel('Test", "instances # and the predicted probabilities for the whole test", "--- SECTION 2 --- # Store the predicted probability for", "6 --- # Get the base learner predictions predictions_1 =", "print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy of hard", "as the average predicted probability # x=[] y_1=[] y_2=[] y_3=[]", "predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy of hard voting print('-'*30)", "as mpl import matplotlib.pyplot as plt mpl.style.use('seaborn-paper') # --- SECTION", "instance, for each base learner # as well as the", "plt.bar(x, y_1, 3, label='5NN') plt.bar(x, y_2, 2, label='NB') plt.bar(x, y_3,", "y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3 --- # Plot the predicted", "each base learner # as well as the average predicted", "training data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train)", "learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) # --- SECTION 5 --- #", "predictions_1 = learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) #", "the required libraries import matplotlib as mpl import matplotlib.pyplot as", "the whole test set errors = y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test)", "--- # Predict the most probable class hard_predictions = voting.predict(x_test)", "= datasets.load_breast_cancer() x, y = breast_cancer.data, breast_cancer.target # Split the", "x_train, y_train = x[:-test_samples], y[:-test_samples] x_test, y_test = x[-test_samples:], y[-test_samples:]", "y_train) # --- SECTION 5 --- # Predict the most", "hard voting print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions)) # --- SECTION", "base learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test,", "# Import the required libraries import matplotlib as mpl import", "label='5NN') plt.bar(x, y_2, 2, label='NB') plt.bar(x, y_3, 1, label='50NN') plt.scatter(x,", "mpl import matplotlib.pyplot as plt mpl.style.use('seaborn-paper') # --- SECTION 2", "Accuracy of hard voting print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions)) #", "an X plt.bar(x, y_1, 3, label='5NN') plt.bar(x, y_2, 2, label='NB')", "as an X plt.bar(x, y_1, 3, label='5NN') plt.bar(x, y_2, 2,", "# Import the required libraries from sklearn import datasets, naive_bayes,", "--- SECTION 1 --- # Import the required libraries import", "SECTION 7 --- # Accuracies of base learners print('L1:', accuracy_score(y_test,", "breast_cancer.target # Split the train and test samples test_samples =", "import datasets, naive_bayes, svm, neighbors from sklearn.ensemble import VotingClassifier from", "predictions_3)) # Accuracy of hard voting print('-'*30) print('Hard Voting:', accuracy_score(y_test,", "Positive', zorder=10) y = [0.5 for x in range(len(errors))] plt.plot(y,", "# Accuracy of hard voting print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions))", "plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10) y =", "5 --- # Predict the most probable class hard_predictions =", "x[-test_samples:], y[-test_samples:] # --- SECTION 2 --- # Instantiate the", "import VotingClassifier from sklearn.metrics import accuracy_score # Load the dataset", "# --- SECTION 3 --- # Instantiate the voting classifier", "--- # Instantiate the learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2", "voting print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions)) # --- SECTION 1", "Instantiate the learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB()", "# Get the wrongly predicted instances # and the predicted", "accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy of hard voting", "VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)], voting='soft') # --- SECTION", "and the average probability as an X plt.bar(x, y_1, 3,", "as plt mpl.style.use('seaborn-paper') # --- SECTION 2 --- # Get", "100 x_train, y_train = x[:-test_samples], y[:-test_samples] x_test, y_test = x[-test_samples:],", "SECTION 4 --- # Fit classifier with the training data", "range(len(errors)): if not errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0])", "the training data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train,", "probable class hard_predictions = voting.predict(x_test) # --- SECTION 6 ---", "predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy of", "not errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) #", "from sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score # Load", "probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) # --- SECTION 2", "sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score # Load the", "VotingClassifier from sklearn.metrics import accuracy_score # Load the dataset breast_cancer", "y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3 --- #", "learner_2 = naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3", "Import the required libraries import matplotlib as mpl import matplotlib.pyplot", "learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) # --- SECTION", "SECTION 1 --- # Import the required libraries from sklearn", "print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy of hard voting print('-'*30) print('Hard", "a bar and the average probability as an X plt.bar(x,", "--- # Accuracies of base learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:'," ]
[ "by Django 4.0.3 on 2022-03-23 14:31 import datetime from django.db", "migrations, models class Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ]", "23, 17, 31, 17, 27766)), ), migrations.AlterField( model_name='news', name='headline', field=models.CharField(max_length=100),", "4.0.3 on 2022-03-23 14:31 import datetime from django.db import migrations,", "dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField(", "17, 31, 17, 27766)), ), migrations.AlterField( model_name='news', name='headline', field=models.CharField(max_length=100), ),", "on 2022-03-23 14:31 import datetime from django.db import migrations, models", "'0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3,", "model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)), ),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('API',", "import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies", "operations = [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17,", "[ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17,", "name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)), ), migrations.AlterField(", "class Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations =", "] operations = [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "[ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField( model_name='news', name='date_time',", "field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)), ), migrations.AlterField( model_name='news',", "# Generated by Django 4.0.3 on 2022-03-23 14:31 import datetime", "Django 4.0.3 on 2022-03-23 14:31 import datetime from django.db import", "2022-03-23 14:31 import datetime from django.db import migrations, models class", "3, 23, 17, 31, 17, 27766)), ), migrations.AlterField( model_name='news', name='headline',", "Generated by Django 4.0.3 on 2022-03-23 14:31 import datetime from", "models class Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations", "Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [", "14:31 import datetime from django.db import migrations, models class Migration(migrations.Migration):", "('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022,", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'),", "= [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField( model_name='news',", "= [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31,", "31, 17, 27766)), ), migrations.AlterField( model_name='news', name='headline', field=models.CharField(max_length=100), ), ]", "<gh_stars>0 # Generated by Django 4.0.3 on 2022-03-23 14:31 import", "migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)),", "datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies =" ]
[ "# Scale to the range (-1,+1) samples = np.append(sample_train, sample_test,", "features to number of qubits pca = PCA(n_components=n).fit(sample_train) sample_train =", "pca = PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test = pca.transform(sample_test) #", "Pick training size number of samples from each distro training_input", "# # (C) Copyright IBM 2018, 2020. # # This", "root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.", "numpy as np from sklearn import datasets from sklearn.model_selection import", "of Qiskit. # # (C) Copyright IBM 2018, 2020. #", "in the root directory # of this source tree or", "as np from sklearn import datasets from sklearn.model_selection import train_test_split", "from qiskit.aqua import MissingOptionalLibraryError def iris(training_size, test_size, n, plot_data=False): \"\"\"", "number of features to number of qubits pca = PCA(n_components=n).fit(sample_train)", "samples from each distro training_input = {key: (sample_train[label_train == k,", ":])[:test_size] for k, key in enumerate(class_labels)} if plot_data: try: import", "0][:training_size], sample_train[label_train == k, 1][:training_size]) plt.title(\"Iris dataset\") plt.show() return sample_train,", "== k, :])[:test_size] for k, key in enumerate(class_labels)} if plot_data:", "ex for k in range(0, 3): plt.scatter(sample_train[label_train == k, 0][:training_size],", "iris dataset \"\"\" import numpy as np from sklearn import", "PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test = pca.transform(sample_test) # Scale to", "except ImportError as ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install", "std_scale.transform(sample_test) # Now reduce number of features to number of", "works of this code must retain this # copyright notice,", "std_scale = StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) #", "# that they have been altered from the originals. \"\"\"", "r'B', r'C'] data, target = datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test", "target = datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test = \\ train_test_split(data,", "\"\"\" iris dataset \"\"\" import numpy as np from sklearn", "= MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) #", "== k, 0][:training_size], sample_train[label_train == k, 1][:training_size]) plt.title(\"Iris dataset\") plt.show()", "np from sklearn import datasets from sklearn.model_selection import train_test_split from", "gaussian around 0 with unit variance std_scale = StandardScaler().fit(sample_train) sample_train", "at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of", "must retain this # copyright notice, and modified files need", "range(0, 3): plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train == k, 1][:training_size])", "number of qubits pca = PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test", "minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) # Pick training size number of", "libname='Matplotlib', name='iris', pip_install='pip install matplotlib') from ex for k in", "a copy of this license in the LICENSE.txt file in", "# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # #", "if plot_data: try: import matplotlib.pyplot as plt except ImportError as", "def iris(training_size, test_size, n, plot_data=False): \"\"\" returns iris dataset \"\"\"", "r'C'] data, target = datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test =", "= pca.transform(sample_test) # Scale to the range (-1,+1) samples =", "from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from", "axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test =", "sample_train = minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) # Pick training size", "k, :])[:test_size] for k, key in enumerate(class_labels)} if plot_data: try:", "\"\"\" import numpy as np from sklearn import datasets from", "directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. #", "in enumerate(class_labels)} test_input = {key: (sample_test[label_test == k, :])[:test_size] for", "to number of qubits pca = PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train)", "train_test_split(data, target, test_size=1, random_state=42) # Now we standardize for gaussian", "ImportError as ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install matplotlib')", "unit variance std_scale = StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test =", "# This code is part of Qiskit. # # (C)", "3): plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train == k, 1][:training_size]) plt.title(\"Iris", "\\ train_test_split(data, target, test_size=1, random_state=42) # Now we standardize for", "plot_data=False): \"\"\" returns iris dataset \"\"\" class_labels = [r'A', r'B',", "datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler", "import MissingOptionalLibraryError def iris(training_size, test_size, n, plot_data=False): \"\"\" returns iris", "target, test_size=1, random_state=42) # Now we standardize for gaussian around", "range (-1,+1) samples = np.append(sample_train, sample_test, axis=0) minmax_scale = MinMaxScaler((-1,", "minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test)", "sample_test, label_train, label_test = \\ train_test_split(data, target, test_size=1, random_state=42) #", "class_labels = [r'A', r'B', r'C'] data, target = datasets.load_iris(return_X_y=True) sample_train,", "# # This code is licensed under the Apache License,", "the originals. \"\"\" iris dataset \"\"\" import numpy as np", "to the range (-1,+1) samples = np.append(sample_train, sample_test, axis=0) minmax_scale", "Apache License, Version 2.0. You may # obtain a copy", "that they have been altered from the originals. \"\"\" iris", "label_train, label_test = \\ train_test_split(data, target, test_size=1, random_state=42) # Now", "for k, key in enumerate(class_labels)} test_input = {key: (sample_test[label_test ==", "name='iris', pip_install='pip install matplotlib') from ex for k in range(0,", "variance std_scale = StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test)", "in range(0, 3): plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train == k,", "from the originals. \"\"\" iris dataset \"\"\" import numpy as", "[r'A', r'B', r'C'] data, target = datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train,", "pca.transform(sample_train) sample_test = pca.transform(sample_test) # Scale to the range (-1,+1)", "MissingOptionalLibraryError def iris(training_size, test_size, n, plot_data=False): \"\"\" returns iris dataset", "\"\"\" class_labels = [r'A', r'B', r'C'] data, target = datasets.load_iris(return_X_y=True)", "sample_train = std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) # Now reduce number", "LICENSE.txt file in the root directory # of this source", "have been altered from the originals. \"\"\" iris dataset \"\"\"", "You may # obtain a copy of this license in", "indicating # that they have been altered from the originals.", "may # obtain a copy of this license in the", "= pca.transform(sample_train) sample_test = pca.transform(sample_test) # Scale to the range", "# # Any modifications or derivative works of this code", "ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install matplotlib') from ex", "part of Qiskit. # # (C) Copyright IBM 2018, 2020.", "matplotlib') from ex for k in range(0, 3): plt.scatter(sample_train[label_train ==", "for k in range(0, 3): plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train", "of qubits pca = PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test =", "import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import", "raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install matplotlib') from ex for", "dataset \"\"\" import numpy as np from sklearn import datasets", "qubits pca = PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test = pca.transform(sample_test)", "StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) # Now reduce", "carry a notice indicating # that they have been altered", "sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from qiskit.aqua", "= datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test = \\ train_test_split(data, target,", "# obtain a copy of this license in the LICENSE.txt", "each distro training_input = {key: (sample_train[label_train == k, :])[:training_size] for", "{key: (sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)}", "This code is part of Qiskit. # # (C) Copyright", "licensed under the Apache License, Version 2.0. You may #", "source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or", "# (C) Copyright IBM 2018, 2020. # # This code", "enumerate(class_labels)} if plot_data: try: import matplotlib.pyplot as plt except ImportError", "notice indicating # that they have been altered from the", "code must retain this # copyright notice, and modified files", "# Pick training size number of samples from each distro", "plot_data: try: import matplotlib.pyplot as plt except ImportError as ex:", "sample_train[label_train == k, 1][:training_size]) plt.title(\"Iris dataset\") plt.show() return sample_train, training_input,", "this license in the LICENSE.txt file in the root directory", "the root directory # of this source tree or at", "n, plot_data=False): \"\"\" returns iris dataset \"\"\" class_labels = [r'A',", "try: import matplotlib.pyplot as plt except ImportError as ex: raise", "for k, key in enumerate(class_labels)} if plot_data: try: import matplotlib.pyplot", "test_size, n, plot_data=False): \"\"\" returns iris dataset \"\"\" class_labels =", "with unit variance std_scale = StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test", "Qiskit. # # (C) Copyright IBM 2018, 2020. # #", "the LICENSE.txt file in the root directory # of this", "derivative works of this code must retain this # copyright", "1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) # Pick training", "import numpy as np from sklearn import datasets from sklearn.model_selection", "of samples from each distro training_input = {key: (sample_train[label_train ==", "we standardize for gaussian around 0 with unit variance std_scale", "as plt except ImportError as ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris',", "k in range(0, 3): plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train ==", "the Apache License, Version 2.0. You may # obtain a", "= \\ train_test_split(data, target, test_size=1, random_state=42) # Now we standardize", "sample_test = minmax_scale.transform(sample_test) # Pick training size number of samples", "under the Apache License, Version 2.0. You may # obtain", ":])[:training_size] for k, key in enumerate(class_labels)} test_input = {key: (sample_test[label_test", "Version 2.0. You may # obtain a copy of this", "PCA from qiskit.aqua import MissingOptionalLibraryError def iris(training_size, test_size, n, plot_data=False):", "or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works", "MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install matplotlib') from ex for k", "IBM 2018, 2020. # # This code is licensed under", "as ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install matplotlib') from", "iris dataset \"\"\" class_labels = [r'A', r'B', r'C'] data, target", "0 with unit variance std_scale = StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train)", "(sample_test[label_test == k, :])[:test_size] for k, key in enumerate(class_labels)} if", "plt except ImportError as ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip", "# This code is licensed under the Apache License, Version", "Now we standardize for gaussian around 0 with unit variance", "= np.append(sample_train, sample_test, axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train =", "pip_install='pip install matplotlib') from ex for k in range(0, 3):", "test_size=1, random_state=42) # Now we standardize for gaussian around 0", "of this license in the LICENSE.txt file in the root", "retain this # copyright notice, and modified files need to", "number of samples from each distro training_input = {key: (sample_train[label_train", "= {key: (sample_train[label_train == k, :])[:training_size] for k, key in", "import PCA from qiskit.aqua import MissingOptionalLibraryError def iris(training_size, test_size, n,", "== k, :])[:training_size] for k, key in enumerate(class_labels)} test_input =", "this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications", "qiskit.aqua import MissingOptionalLibraryError def iris(training_size, test_size, n, plot_data=False): \"\"\" returns", "k, key in enumerate(class_labels)} test_input = {key: (sample_test[label_test == k,", "test_input = {key: (sample_test[label_test == k, :])[:test_size] for k, key", "(-1,+1) samples = np.append(sample_train, sample_test, axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples)", "altered from the originals. \"\"\" iris dataset \"\"\" import numpy", "the range (-1,+1) samples = np.append(sample_train, sample_test, axis=0) minmax_scale =", "training size number of samples from each distro training_input =", "in enumerate(class_labels)} if plot_data: try: import matplotlib.pyplot as plt except", "label_test = \\ train_test_split(data, target, test_size=1, random_state=42) # Now we", "std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) # Now reduce number of features", "they have been altered from the originals. \"\"\" iris dataset", "samples = np.append(sample_train, sample_test, axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train", "from ex for k in range(0, 3): plt.scatter(sample_train[label_train == k,", "file in the root directory # of this source tree", "Now reduce number of features to number of qubits pca", "to carry a notice indicating # that they have been", "= minmax_scale.transform(sample_test) # Pick training size number of samples from", "k, :])[:training_size] for k, key in enumerate(class_labels)} test_input = {key:", "import matplotlib.pyplot as plt except ImportError as ex: raise MissingOptionalLibraryError(", "import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from qiskit.aqua import", "= StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) # Now", "tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative", "iris(training_size, test_size, n, plot_data=False): \"\"\" returns iris dataset \"\"\" class_labels", "minmax_scale.transform(sample_test) # Pick training size number of samples from each", "distro training_input = {key: (sample_train[label_train == k, :])[:training_size] for k,", "sample_test, axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test", "sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import", "2020. # # This code is licensed under the Apache", "around 0 with unit variance std_scale = StandardScaler().fit(sample_train) sample_train =", "size number of samples from each distro training_input = {key:", "key in enumerate(class_labels)} test_input = {key: (sample_test[label_test == k, :])[:test_size]", "dataset \"\"\" class_labels = [r'A', r'B', r'C'] data, target =", "of this code must retain this # copyright notice, and", "code is part of Qiskit. # # (C) Copyright IBM", "originals. \"\"\" iris dataset \"\"\" import numpy as np from", "= [r'A', r'B', r'C'] data, target = datasets.load_iris(return_X_y=True) sample_train, sample_test,", "a notice indicating # that they have been altered from", "notice, and modified files need to carry a notice indicating", "of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any", "from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing", "k, key in enumerate(class_labels)} if plot_data: try: import matplotlib.pyplot as", "copyright notice, and modified files need to carry a notice", "standardize for gaussian around 0 with unit variance std_scale =", "License, Version 2.0. You may # obtain a copy of", "2018, 2020. # # This code is licensed under the", "code is licensed under the Apache License, Version 2.0. You", "== k, 1][:training_size]) plt.title(\"Iris dataset\") plt.show() return sample_train, training_input, test_input,", "license in the LICENSE.txt file in the root directory #", "np.append(sample_train, sample_test, axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train)", "Copyright IBM 2018, 2020. # # This code is licensed", "(sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)} test_input", "of features to number of qubits pca = PCA(n_components=n).fit(sample_train) sample_train", "# Now we standardize for gaussian around 0 with unit", "datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test = \\ train_test_split(data, target, test_size=1,", "or derivative works of this code must retain this #", "= minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) # Pick training size number", "obtain a copy of this license in the LICENSE.txt file", "random_state=42) # Now we standardize for gaussian around 0 with", "install matplotlib') from ex for k in range(0, 3): plt.scatter(sample_train[label_train", "http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this", "sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition", "from sklearn.decomposition import PCA from qiskit.aqua import MissingOptionalLibraryError def iris(training_size,", "{key: (sample_test[label_test == k, :])[:test_size] for k, key in enumerate(class_labels)}", "sample_train, sample_test, label_train, label_test = \\ train_test_split(data, target, test_size=1, random_state=42)", "is licensed under the Apache License, Version 2.0. You may", "pca.transform(sample_test) # Scale to the range (-1,+1) samples = np.append(sample_train,", "2.0. You may # obtain a copy of this license", "\"\"\" returns iris dataset \"\"\" class_labels = [r'A', r'B', r'C']", "copy of this license in the LICENSE.txt file in the", "key in enumerate(class_labels)} if plot_data: try: import matplotlib.pyplot as plt", "# Now reduce number of features to number of qubits", "from each distro training_input = {key: (sample_train[label_train == k, :])[:training_size]", "modified files need to carry a notice indicating # that", "StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from qiskit.aqua import MissingOptionalLibraryError", "MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) # Pick", "= std_scale.transform(sample_test) # Now reduce number of features to number", "train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA", "need to carry a notice indicating # that they have", "returns iris dataset \"\"\" class_labels = [r'A', r'B', r'C'] data,", "training_input = {key: (sample_train[label_train == k, :])[:training_size] for k, key", "been altered from the originals. \"\"\" iris dataset \"\"\" import", "in the LICENSE.txt file in the root directory # of", "sample_test = std_scale.transform(sample_test) # Now reduce number of features to", "MinMaxScaler from sklearn.decomposition import PCA from qiskit.aqua import MissingOptionalLibraryError def", "Scale to the range (-1,+1) samples = np.append(sample_train, sample_test, axis=0)", "this code must retain this # copyright notice, and modified", "# Any modifications or derivative works of this code must", "for gaussian around 0 with unit variance std_scale = StandardScaler().fit(sample_train)", "this # copyright notice, and modified files need to carry", "import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler,", "= {key: (sample_test[label_test == k, :])[:test_size] for k, key in", "Any modifications or derivative works of this code must retain", "modifications or derivative works of this code must retain this", "# copyright notice, and modified files need to carry a", "enumerate(class_labels)} test_input = {key: (sample_test[label_test == k, :])[:test_size] for k,", "k, 1][:training_size]) plt.title(\"Iris dataset\") plt.show() return sample_train, training_input, test_input, class_labels", "files need to carry a notice indicating # that they", "reduce number of features to number of qubits pca =", "is part of Qiskit. # # (C) Copyright IBM 2018,", "matplotlib.pyplot as plt except ImportError as ex: raise MissingOptionalLibraryError( libname='Matplotlib',", "This code is licensed under the Apache License, Version 2.0.", "and modified files need to carry a notice indicating #", "sklearn.decomposition import PCA from qiskit.aqua import MissingOptionalLibraryError def iris(training_size, test_size,", "data, target = datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test = \\", "sample_train = pca.transform(sample_train) sample_test = pca.transform(sample_test) # Scale to the", "from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from", "(C) Copyright IBM 2018, 2020. # # This code is", "= std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) # Now reduce number of", "= PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test = pca.transform(sample_test) # Scale", "k, 0][:training_size], sample_train[label_train == k, 1][:training_size]) plt.title(\"Iris dataset\") plt.show() return", "sample_test = pca.transform(sample_test) # Scale to the range (-1,+1) samples", "plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train == k, 1][:training_size]) plt.title(\"Iris dataset\")" ]
[ "@pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name):", "test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers)", "test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True assert controller.request.override_renderer is None", "'get_web_message'), ]) def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name):", "controller.request.override_renderer is None controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def", "test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers = {'Location':", "= response_mode view = getattr(controller, view_name) assert view() == {", "@pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect =", "True assert controller.request.override_renderer is None controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2'", "json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as", "assert controller.request.override_renderer is None controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user')", "svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request", "oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body", "OAuthTokenError('the error message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code ==", "'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self,", "assert result == {'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name},", "view() assert exc.value.description == 'boom!' def test_post_redirects_to_client(self, controller, auth_client): response", "oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body, 400)", "pyramid_config, user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid", "result == {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False}", "pyramid_request): context = OAuthTokenError('error description', 'error_type') result = views.api_token_error(context, pyramid_request)", "view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view =", "return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config, pyramid_request):", "auth_client.trusted = True view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url,", "parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth': ['True']}", "= None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self): return OAuthRequest('/')", "svc = mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read', 'annotation:write'] credentials =", "oauth_provider): body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body, 200)", "from pyramid import httpexceptions from h._compat import urlparse from h.exceptions", "@pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def", "pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return svc", "def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True response = controller.get_web_message()", "@pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with(", "token is missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token =", "utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token =", "= controller.post_web_message() assert response['state'] is None @pytest.fixture def controller(self, pyramid_request):", "'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value", "assert exc.value.body == body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method =", "test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted = True view", "user.userid: return user user_svc.fetch.side_effect = fake_fetch return user @pytest.fixture def", "controller.post_web_message() assert response['state'] is None @pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer", "redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None,", "'&scope=exploit' view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user},", "400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body == body", "assert pyramid_request.response.status_code == 403 def test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('',", "== body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type']", "api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name',", "oauth_request, response_mode, view_name): oauth_request.response_mode = response_mode view = getattr(controller, view_name)", "svc.create_revocation_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc class", "'origin': 'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client,", "== '/login' assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name',", "'missing_token' assert 'Bearer token is missing' in exc.value.message def test_it_validates_token(self,", "pyramid_request, view_name): view = getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name',", "= mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return svc @pytest.fixture def pyramid_request(self,", "pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture def auth_client(self, factories): return factories.AuthClient(name='Test", "view = getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message'])", "def test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('', 'error_type') result = views.api_token_error(context,", "= OAuthTokenError('', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error'] ==", "= InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name)", "pyramid_request.auth_token = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type", "developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value = developer_token result = views.debug_token(pyramid_request)", "developer_token result = views.debug_token(pyramid_request) assert result == {'userid': developer_token.userid, 'issued_at':", "pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def authenticated_user(self,", "{'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message', 'get_web_message'),", "pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body =", "factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid == user.userid: return user", "view = getattr(controller, view_name) view() parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path", "'get'), ('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request,", "== { 'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value,", "missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = '' with", "exist or is expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service,", "= views.api_token_error(context, pyramid_request) assert result['error'] == 'error_type' def test_it_returns_error_description(self, pyramid_request):", "body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] =", "as exc: controller.post() assert exc.value.body == body @pytest.fixture def controller(self,", "= OAuthTokenError('', 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not", "test_post_redirects_to_client(self, controller, auth_client): response = controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert", "token_service.fetch.return_value = oauth_token result = views.debug_token(pyramid_request) assert result == {'userid':", "response = controller.post_web_message() assert response == { 'code': 'abcdef123456', 'origin':", "{'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials)", "as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get',", "def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value =", "def test_post_web_message_returns_expected_context(self, controller, auth_client): response = controller.post_web_message() assert response ==", "'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request)", "True view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user},", "= {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value = (scopes,", "controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body,", "view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES)", "'error_type') result = views.api_token_error(context, pyramid_request) assert result['error_description'] == 'error description'", "auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar', 'username':", "-*- from __future__ import unicode_literals import datetime import json import", "pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'}", "pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def", "not exist or is expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request,", "= 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def authenticated_user(self, factories, pyramid_config, user_svc):", "coding: utf-8 -*- from __future__ import unicode_literals import datetime import", "response = controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected", "'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as exc:", "controller.get_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state':", "= 'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers =", "None, 'expired': False} @pytest.fixture def token_service(self, pyramid_config, pyramid_request): svc =", "OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES from h.services.user import user_service_factory from", "@pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers", "getattr(controller, view_name) view() assert exc.value.description == 'boom!' def test_post_redirects_to_client(self, controller,", "assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None,", "pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request,", "return pyramid_request @pytest.fixture def authenticated_user(self, factories, pyramid_config, user_svc): user =", "from h.views import api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class", "controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client):", "return factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the", "mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read', 'annotation:write'] credentials = {'client_id': auth_client.id,", "= datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self,", "auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username, }", "} @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request,", "'get_web_message']) def test_get_validates_request(self, controller, pyramid_request, view_name): view = getattr(controller, view_name)", "'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted", "token is missing' in exc.value.message def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token", "factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the error", "'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token = developer_token.value", "class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url,", "body, status) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture def auth_client(self, factories):", "oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value = oauth_token result = views.debug_token(pyramid_request)", "pyramid_request, token_service, oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value = oauth_token result", "description' def test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None, 'invalid_request') result =", "pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code'", "def pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def", "exc: view = getattr(controller, view_name) view() parsed_url = urlparse.urlparse(exc.value.location) assert", "def test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('', 'invalid_request') result = views.api_token_error(context,", "pytest.raises(httpexceptions.HTTPFound) as exc: view = getattr(controller, view_name) view() parsed_url =", "'{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected def test_post_web_message_returns_expected_context(self, controller, auth_client): response", "assert result == {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired':", "pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config):", "views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self): return OAuthRequest('/') @pytest.fixture def oauth_provider(self,", "body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body, 200) assert", "status) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture def auth_client(self, factories): return", "pyramid_request)) pyramid_config.register_service(svc, name='user') return svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url", "views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token is missing'", "def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value =", "'expired': False} @pytest.fixture def token_service(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None,", "as exc: view = getattr(controller, view_name) view() parsed_url = urlparse.urlparse(exc.value.location)", "pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc,", "instance=True) scopes = ['annotation:read', 'annotation:write'] credentials = {'client_id': auth_client.id, 'state':", "oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller,", "with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert", "view_name) assert view() == { 'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode':", "oauth_provider.create_token_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post()", "TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self, controller, pyramid_request, view_name):", "302) response = controller.post_web_message() assert response['state'] is None @pytest.fixture def", "]) def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode", "{'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False} @pytest.fixture def", "result def test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('', 'invalid_request') result =", "['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as", "does not exist or is expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self,", "'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with", "is expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token", "== 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True", "import api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user')", "@pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound)", "'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError)", "None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token'", "authenticated_user(self, factories, pyramid_config, user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid):", "test_get_validates_request(self, controller, pyramid_request, view_name): view = getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with(", "in result def test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('', 'invalid_request') result", "= ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc class TestDebugToken(object):", "'&state=foobar' + \\ '&scope=exploit' view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with(", "== 'error description' def test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None, 'invalid_request')", "oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value", "credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted", "pyramid_request @pytest.fixture def authenticated_user(self, factories, pyramid_config, user_svc): user = factories.User.build()", "pyramid_request): auth_client.trusted = True response = controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)", "controller, auth_client): auth_client.trusted = True assert controller.request.override_renderer is None controller.get_web_message()", "controller.post() == {'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider): body =", "\\ '&scope=exploit' view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user':", "pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid == user.userid: return user user_svc.fetch.side_effect", "'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers = {'X-Test-ID':", "test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('', 'error_type') result = views.api_token_error(context, pyramid_request)", "403 def test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('', 'error_type') result =", "def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError) as exc:", "== 'boom!' def test_post_redirects_to_client(self, controller, auth_client): response = controller.post() expected", "pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as exc: view = getattr(controller, view_name)", "'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client,", "body = None status = 302 svc.create_authorization_response.return_value = (headers, body,", "controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected @pytest.mark.usefixtures('authenticated_user') def", "test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode = response_mode", "'response_type': auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message'])", "{'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status = 302 svc.create_authorization_response.return_value =", "response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request))", "== {'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error':", "+ datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self, factories): return", "result = views.debug_token(pyramid_request) assert result == {'userid': oauth_token.userid, 'client': {'id':", "= (scopes, credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None", "scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted = True", "from h.util.datetime import utc_iso8601 from h.views import api_auth as views", "fake_fetch return user @pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider')", "h.services.auth_token import auth_token_service_factory from h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator import", "{'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'})", "pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request,", "False} @pytest.fixture def token_service(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request))", "pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider):", "== {'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created),", "context = OAuthTokenError('the error message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert", "exc.value.type == 'missing_token' assert 'Bearer token is missing' in exc.value.message", "utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token):", "assert response.location == expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted", "'annotation:write'] credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value", "h.util.datetime import utc_iso8601 from h.views import api_auth as views @pytest.mark.usefixtures('routes',", "response = controller.get_web_message() assert response['state'] is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message'])", "pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller,", "@pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with(", "def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers =", "test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request)", "__future__ import unicode_literals import datetime import json import mock import", "= getattr(controller, view_name) assert view() == { 'client_id': auth_client.id, 'client_name':", "= json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest)", "@pytest.fixture def oauth_request(self): return OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config, auth_client,", "test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as exc: view =", "@pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token'] = 'the-token'", "'' with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token'", "@pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture", "token_service, oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value = oauth_token result =", "return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self, factories): return factories.DeveloperToken() class", "'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client,", "return user @pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class", "urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'),", "import httpexceptions from h._compat import urlparse from h.exceptions import OAuthTokenError", "pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token", "True response = controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location ==", "controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body,", "= mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return", "{'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at':", "test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value = oauth_token", "@pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name): pyramid_request.url", "test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True response = controller.get_web_message() assert", "view_name): view = getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get',", "[ (None, 'get'), ('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self, controller, auth_client,", "({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body", "'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message', 'get_web_message'), ]) def", "@pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def", "ResponseType from h.services.auth_token import auth_token_service_factory from h.services.oauth_provider import OAuthProviderService from", "= ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert", "credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller,", "token_service(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return", "controller.post() assert exc.value.body == body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method", "test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request)", "= 302 svc.create_authorization_response.return_value = (headers, body, status) pyramid_config.register_service(svc, name='oauth_provider') return", "'{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status = 302 svc.create_authorization_response.return_value = (headers,", "pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc", "assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted", "controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider):", "name='auth_token') return svc @pytest.fixture def oauth_token(self, factories): authclient = factories.AuthClient(name='Example", "response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get',", "pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response = controller.post() assert response", "({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc class TestDebugToken(object): def", "def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}',", "getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self,", "controller(self, pyramid_request): pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def", "['get', 'get_web_message']) def test_get_validates_request(self, controller, pyramid_request, view_name): view = getattr(controller,", "pyramid_request): context = OAuthTokenError('', 'invalid_request') result = views.api_token_error(context, pyramid_request) assert", "= True response = controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location", "factories, pyramid_config, user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if", "pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name) view() assert exc.value.description", "'error_type' def test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error description', 'error_type') result", "with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name) view() assert", "'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc:", "exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value", "return svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None", "test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('', 'invalid_request') result = views.api_token_error(context, pyramid_request)", "'/login' assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [", "response = controller.get_web_message() assert response == { 'code': 'abcdef123456', 'origin':", "user user_svc.fetch.side_effect = fake_fetch return user @pytest.fixture def routes(self, pyramid_config):", "@pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self, controller, pyramid_request, view_name): view =", "with pytest.raises(httpexceptions.HTTPFound) as exc: view = getattr(controller, view_name) view() parsed_url", "{'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.get_web_message()", "pyramid_request, token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value = None with pytest.raises(OAuthTokenError)", "auth_client): response = controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location ==", "view = getattr(controller, view_name) view() assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name',", "= ['annotation:read', 'annotation:write'] credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request':", "== 403 def test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('', 'error_type') result", "import unicode_literals import datetime import json import mock import pytest", "'Bearer token does not exist or is expired' in exc.value.message", "controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] =", "svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post()", "assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar',", "+ \\ '&scope=exploit' view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url,", "user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid ==", "'state': 'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self,", "import urlparse from h.exceptions import OAuthTokenError from h.models.auth_client import ResponseType", "authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user,", "def developer_token(self, factories): return factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request):", "'the-token' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self,", "['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with", "'{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client):", "} @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted = True", "factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config, pyramid_request): svc", "('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode,", "def test_post_redirects_to_client(self, controller, auth_client): response = controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)", "json import mock import pytest from oauthlib.oauth2 import InvalidRequestFatalError from", "= mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read', 'annotation:write'] credentials = {'client_id':", "def oauth_request(self): return OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config, auth_client, oauth_request):", "authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted =", "'error_description' not in result def test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('',", "'expires_at': None, 'expired': False} @pytest.fixture def token_service(self, pyramid_config, pyramid_request): svc", "def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode =", "'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request)", "httpexceptions from h._compat import urlparse from h.exceptions import OAuthTokenError from", "def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code']", "pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body = json.dumps({'access_token':", "mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc", "test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self,", "= oauth_token.value token_service.fetch.return_value = oauth_token result = views.debug_token(pyramid_request) assert result", "urlparse from h.exceptions import OAuthTokenError from h.models.auth_client import ResponseType from", "= views.api_token_error(context, pyramid_request) assert 'error_description' not in result def test_it_skips_description_when_empty(self,", "pyramid_request): pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert", "= (headers, None, 302) response = controller.get_web_message() assert response['state'] is", "= 'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers = {'X-Test-ID': '1234'} return", "= getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user')", "== {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False} @pytest.fixture", "controller, pyramid_request, authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \\ '&response_type=code'", "name='oauth_provider') return svc @pytest.fixture def auth_client(self, factories): return factories.AuthClient(name='Test Client',", "{'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc =", "= {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc", "== { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } def", "view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller,", "expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def", "test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value = None with", "TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the error message', 'error_type',", "oauth_token(self, factories): authclient = factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow() +", "import json import mock import pytest from oauthlib.oauth2 import InvalidRequestFatalError", "getattr(controller, view_name) view() assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message'])", "InvalidRequestFatalError from oauthlib.common import Request as OAuthRequest from pyramid import", "is missing' in exc.value.message def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token =", "def oauth_token(self, factories): authclient = factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow()", "auth_token_service_factory from h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES from", "= (headers, None, 302) response = controller.post_web_message() assert response['state'] is", "'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider):", "'Bearer token is missing' in exc.value.message def test_it_validates_token(self, pyramid_request, token_service):", "from __future__ import unicode_literals import datetime import json import mock", "import InvalidRequestFatalError from oauthlib.common import Request as OAuthRequest from pyramid", "return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value", "body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token'] =", "developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False} @pytest.fixture def token_service(self,", "'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted =", "as OAuthRequest from pyramid import httpexceptions from h._compat import urlparse", "'&response_type=code' + \\ '&state=foobar' + \\ '&scope=exploit' view = getattr(controller,", "'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body, 200) assert controller.post() == {'access_token':", "auth_client): auth_client.trusted = True response = controller.get_web_message() assert response ==", "svc.create_token_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider')", "return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self): return OAuthRequest('/') @pytest.fixture def", "test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value = developer_token", "in exc.value.message def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request)", "token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value =", "views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value =", "result = views.debug_token(pyramid_request) assert result == {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created),", "svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None with", "authclient = factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return", "controller, auth_client): response = controller.post_web_message() assert response == { 'code':", "(None, 'get'), ('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self, controller, auth_client, authenticated_user,", "pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config):", "'/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post()", "== {'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message',", "exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError) as", "== expected def test_post_web_message_returns_expected_context(self, controller, auth_client): response = controller.post_web_message() assert", "mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc", "view_name) view() assert exc.value.description == 'boom!' def test_post_redirects_to_client(self, controller, auth_client):", "test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc:", "Request as OAuthRequest from pyramid import httpexceptions from h._compat import", "@pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self, controller,", "oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body, 400)", "auth_client.trusted = True response = controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert", "user @pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object):", "not in result def test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('', 'invalid_request')", "pyramid_request, view_name): auth_client.trusted = True view = getattr(controller, view_name) view()", "None @pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None,", "import Request as OAuthRequest from pyramid import httpexceptions from h._compat", "pyramid_config, auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read',", "['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted", "response = controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected", "with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body == body @pytest.fixture", "exc.value.type == 'missing_token' assert 'Bearer token does not exist or", "def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value =", "h.views import api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object):", "test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error description', 'error_type') result = views.api_token_error(context,", "'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name',", "+ \\ '&response_type=code' + \\ '&state=foobar' + \\ '&scope=exploit' view", "'1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response'])", "authenticated_user, pyramid_request): auth_client.trusted = True response = controller.get() expected =", "factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires)", "mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return svc @pytest.fixture def pyramid_request(self, pyramid_request):", "pyramid_request): context = OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context, pyramid_request) assert", "'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller,", "response = controller.post_web_message() assert response['state'] is None @pytest.fixture def controller(self,", "views.debug_token(pyramid_request) assert result == {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None,", "assert 'Bearer token does not exist or is expired' in", "pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self): return", "controller, pyramid_request, view_name): view = getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url)", "pyramid_request.auth_token = 'the-token' token_service.validate.return_value = None with pytest.raises(OAuthTokenError) as exc:", "200) assert controller.post() == {'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider):", "True response = controller.get_web_message() assert response == { 'code': 'abcdef123456',", "mock import pytest from oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common import", "= views.debug_token(pyramid_request) assert result == {'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id,", "== 'missing_token' assert 'Bearer token is missing' in exc.value.message def", "oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request,", "response['state'] is None @pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer = None", "context = OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description'", "result['error_description'] == 'error description' def test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None,", "controller, auth_client): response = controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location", "oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires),", "OAuthTokenError('error description', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error_description'] ==", "'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller,", "test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context, pyramid_request)", "OAuthTokenError from h.models.auth_client import ResponseType from h.services.auth_token import auth_token_service_factory from", "pyramid_request) assert result['error'] == 'error_type' def test_it_returns_error_description(self, pyramid_request): context =", "user_service_factory from h.util.datetime import utc_iso8601 from h.views import api_auth as", "datetime import json import mock import pytest from oauthlib.oauth2 import", "token_service, developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value = developer_token result =", "import utc_iso8601 from h.views import api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider',", "response['state'] is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request,", "pyramid_request): pyramid_request.auth_token = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert", "expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self,", "auth_client.id, 'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials) headers", "\\ '&response_type=code' + \\ '&state=foobar' + \\ '&scope=exploit' view =", "auth_client.trusted = True assert controller.request.override_renderer is None controller.get_web_message() assert controller.request.override_renderer", "context = OAuthTokenError('error description', 'error_type') result = views.api_token_error(context, pyramid_request) assert", "'client_name': auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username,", "expected def test_post_web_message_returns_expected_context(self, controller, auth_client): response = controller.post_web_message() assert response", "view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name):", "OAuthTokenError('', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error'] == 'error_type'", "from h.services.oauth_validator import DEFAULT_SCOPES from h.services.user import user_service_factory from h.util.datetime", "fake_fetch(userid): if userid == user.userid: return user user_svc.fetch.side_effect = fake_fetch", "pyramid_request): context = OAuthTokenError('the error message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request)", "'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self,", "status = 302 svc.create_authorization_response.return_value = (headers, body, status) pyramid_config.register_service(svc, name='oauth_provider')", "= True response = controller.get_web_message() assert response == { 'code':", "oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body", "def auth_client(self, factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def", "== {} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'})", "oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common import Request as OAuthRequest from", "'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar',", "test_it_returns_empty_response_on_success(self, controller): response = controller.post() assert response == {} def", "json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as", "pyramid_request, token_service, developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value = developer_token result", "as exc: view = getattr(controller, view_name) view() assert exc.value.description ==", "pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect =", "json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body, 200) assert controller.post() ==", "context = OAuthTokenError('', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error']", "controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view", "auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted = True view = getattr(controller,", "controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted = True response = controller.get()", "svc.create_authorization_response.return_value = (headers, body, status) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture", "= controller.post() assert response == {} def test_it_raises_when_error(self, controller, oauth_provider):", "from h.models.auth_client import ResponseType from h.services.auth_token import auth_token_service_factory from h.services.oauth_provider", "'{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.post_web_message() assert", "= factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient,", "context = OAuthTokenError('', 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description'", "expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True assert", "def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value = None", "view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message'])", "return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value", "views.api_token_error(context, pyramid_request) assert 'error_description' not in result def test_it_skips_description_when_empty(self, pyramid_request):", "'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self, controller,", "class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None with pytest.raises(OAuthTokenError)", "oauth_provider.create_revocation_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post()", "import datetime import json import mock import pytest from oauthlib.oauth2", "datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self, factories): return factories.DeveloperToken()", "= getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def", "def authenticated_user(self, factories, pyramid_config, user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def", "getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name',", "controller.post_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state':", "test_it_returns_correct_response_on_success(self, controller, oauth_provider): body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({},", "def test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context,", "(headers, None, 302) response = controller.post_web_message() assert response['state'] is None", "@pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None, pyramid_request)", "pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value", "class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the error message',", "view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view =", "None, 302) response = controller.post_web_message() assert response['state'] is None @pytest.fixture", "= getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def", "TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None with pytest.raises(OAuthTokenError) as", "body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body ==", "from oauthlib.common import Request as OAuthRequest from pyramid import httpexceptions", "assert exc.value.description == 'boom!' def test_post_redirects_to_client(self, controller, auth_client): response =", "= 'http://example.com/auth?client_id=the-client-id' + \\ '&response_type=code' + \\ '&state=foobar' + \\", "def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers", "'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted =", "svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return svc @pytest.fixture def", "= 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'} return", "'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not in result", "pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \\ '&response_type=code' + \\ '&state=foobar' +", "def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as", "@pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True assert controller.request.override_renderer", "factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self, factories): return factories.DeveloperToken() class TestAPITokenError(object):", "assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller,", "urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url],", "pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers = {'X-Test-ID':", "import mock import pytest from oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common", "scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect", "'boom!' def test_post_redirects_to_client(self, controller, auth_client): response = controller.post() expected =", "pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture", "view_name): auth_client.trusted = True view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with(", "scopes = ['annotation:read', 'annotation:write'] credentials = {'client_id': auth_client.id, 'state': 'foobar',", "class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url,", "def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None with pytest.raises(OAuthTokenError) as exc:", "controller, auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted = True view =", "= True assert controller.request.override_renderer is None controller.get_web_message() assert controller.request.override_renderer ==", "unicode_literals import datetime import json import mock import pytest from", "as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token", "result['error'] == 'error_type' def test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error description',", "exc.value.description == 'boom!' def test_post_redirects_to_client(self, controller, auth_client): response = controller.post()", "['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name): pyramid_request.url =", "= 'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def", "pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def authenticated_user(self, factories, pyramid_config,", "is missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = ''", "'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-token'", "name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller,", "pyramid_request, authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \\ '&response_type=code' +", "return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider):", "= oauth_token result = views.debug_token(pyramid_request) assert result == {'userid': oauth_token.userid,", "= (headers, body, status) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture def", "return svc @pytest.fixture def oauth_token(self, factories): authclient = factories.AuthClient(name='Example Client')", "@pytest.fixture def oauth_token(self, factories): authclient = factories.AuthClient(name='Example Client') expires =", "= json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest)", "views.api_token_error(context, pyramid_request) assert result['error_description'] == 'error description' def test_it_skips_description_when_missing(self, pyramid_request):", "class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self, controller, pyramid_request,", "views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value =", "response == {} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error':", "result = views.api_token_error(context, pyramid_request) assert 'error_description' not in result def", "@pytest.fixture def auth_client(self, factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture", "= getattr(controller, view_name) view() parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path ==", "svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider')", "oauthlib.common import Request as OAuthRequest from pyramid import httpexceptions from", "authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \\ '&response_type=code' + \\", "pyramid_request.method = 'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers = {'X-Test-ID': '1234'}", "view = getattr(controller, view_name) assert view() == { 'client_id': auth_client.id,", "= ({}, body, 200) assert controller.post() == {'access_token': 'the-access-token'} def", "pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body == body @pytest.fixture def", "assert view() == { 'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode,", "view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client,", "= getattr(controller, view_name) view() assert exc.value.description == 'boom!' def test_post_redirects_to_client(self,", "exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token does", "'request': oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)}", "def user_svc(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user')", "controller, oauth_provider): body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body,", "auth_client): auth_client.trusted = True assert controller.request.override_renderer is None controller.get_web_message() assert", "'the-token' token_service.validate.return_value = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert", "DEFAULT_SCOPES from h.services.user import user_service_factory from h.util.datetime import utc_iso8601 from", "h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES from h.services.user import", "view_name): oauth_request.response_mode = response_mode view = getattr(controller, view_name) assert view()", "def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST,", "import ResponseType from h.services.auth_token import auth_token_service_factory from h.services.oauth_provider import OAuthProviderService", "Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config, pyramid_request): svc =", "response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', }", "{'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc =", "def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as", "in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError)", "is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user,", "test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the error message', 'error_type', status_code=403) views.api_token_error(context,", "controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted =", "view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post',", "'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def authenticated_user(self, factories, pyramid_config, user_svc): user", "oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}', 200)", "auth_client, oauth_provider): auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value", "view_name) view() assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def", "exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request,", "pyramid import httpexceptions from h._compat import urlparse from h.exceptions import", "token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service):", "def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST,", "def controller(self, pyramid_request): pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture", "pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response = controller.post()", "# -*- coding: utf-8 -*- from __future__ import unicode_literals import", "'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller,", "def test_it_returns_empty_response_on_success(self, controller): response = controller.post() assert response == {}", "from h.services.user import user_service_factory from h.util.datetime import utc_iso8601 from h.views", "pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers", "None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self): return OAuthRequest('/') @pytest.fixture", "views.api_token_error(context, pyramid_request) assert result['error'] == 'error_type' def test_it_returns_error_description(self, pyramid_request): context", "'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id'", "user_svc(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return", "in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token = oauth_token.value", "assert result['error_description'] == 'error description' def test_it_skips_description_when_missing(self, pyramid_request): context =", "test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc:", "userid == user.userid: return user user_svc.fetch.side_effect = fake_fetch return user", "test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted = True response =", "True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302)", "test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers)", "302) response = controller.get_web_message() assert response['state'] is None @pytest.mark.parametrize('view_name', ['post',", "from oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common import Request as OAuthRequest", "controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response", "200) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self,", "return svc @pytest.fixture def auth_client(self, factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback',", "= '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller,", "= None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type ==", "pyramid_request) assert 'error_description' not in result def test_it_skips_description_when_empty(self, pyramid_request): context", "def test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error description', 'error_type') result =", "= controller.get_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com',", "None, 302) response = controller.get_web_message() assert response['state'] is None @pytest.mark.parametrize('view_name',", "= '' with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type ==", "body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body, 400) with", "svc.validate_authorization_request.return_value = (scopes, credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body =", "expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token =", "= urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query) == {'next':", "== { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user')", "oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.get_web_message() assert response['state']", "controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self,", "import OAuthTokenError from h.models.auth_client import ResponseType from h.services.auth_token import auth_token_service_factory", "controller, auth_client): auth_client.trusted = True response = controller.get_web_message() assert response", "pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response = controller.post() assert response ==", "missing' in exc.value.message def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token'", "token does not exist or is expired' in exc.value.message def", "= {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc", "None controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller,", "{'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired}", "def token_service(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token')", "@pytest.fixture def developer_token(self, factories): return factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self,", "@pytest.fixture def oauth_provider(self, pyramid_config, auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True)", "oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}', 200)", "@pytest.fixture def token_service(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc,", "({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object):", "controller.get_web_message() assert response['state'] is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self,", "controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def", "oauth_token result = views.debug_token(pyramid_request) assert result == {'userid': oauth_token.userid, 'client':", "return svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return", "= developer_token.value token_service.fetch.return_value = developer_token result = views.debug_token(pyramid_request) assert result", "from h.exceptions import OAuthTokenError from h.models.auth_client import ResponseType from h.services.auth_token", "controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view", "response.location == expected def test_post_web_message_returns_expected_context(self, controller, auth_client): response = controller.post_web_message()", "pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer", "auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def", "+ \\ '&state=foobar' + \\ '&scope=exploit' view = getattr(controller, view_name)", "oauth_token.value token_service.fetch.return_value = oauth_token result = views.debug_token(pyramid_request) assert result ==", "from h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES from h.services.user", "svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture def", "= OAuthTokenError('error description', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error_description']", "@pytest.fixture def user_svc(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc,", "302 svc.create_authorization_response.return_value = (headers, body, status) pyramid_config.register_service(svc, name='oauth_provider') return svc", "def test_get_validates_request(self, controller, pyramid_request, view_name): view = getattr(controller, view_name) view()", "auth_client(self, factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self,", "routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request,", "OAuthTokenError('', 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not in", "pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value = oauth_token result = views.debug_token(pyramid_request) assert", "= controller.get_web_message() assert response['state'] is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def", "= 'the-token' token_service.validate.return_value = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request)", "pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture def oauth_token(self, factories): authclient =", "test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers = {'Location':", "@pytest.fixture def authenticated_user(self, factories, pyramid_config, user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid)", "= None status = 302 svc.create_authorization_response.return_value = (headers, body, status)", "token_service.validate.return_value = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type", "error message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code == 403", "view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \\ '&response_type=code' + \\ '&state=foobar'", "oauth_provider): auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value =", "{'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.post_web_message()", "import DEFAULT_SCOPES from h.services.user import user_service_factory from h.util.datetime import utc_iso8601", "= True view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user':", "{ 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def", "= ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class", "expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected def test_post_web_message_returns_expected_context(self, controller,", "oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def", "pyramid_request) assert pyramid_request.response.status_code == 403 def test_it_returns_the_error(self, pyramid_request): context =", "def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted = True response", "{} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value", "OAuthRequest from pyramid import httpexceptions from h._compat import urlparse from", "pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response = controller.post() assert", "assert controller.post() == {'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider): body", "'missing_token' assert 'Bearer token does not exist or is expired'", "def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as exc: view", "== body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token']", "pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request):", "assert response == {} def test_it_raises_when_error(self, controller, oauth_provider): body =", "= factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid == user.userid: return", "'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self,", "exc: view = getattr(controller, view_name) view() assert exc.value.description == 'boom!'", "headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response", "import OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES from h.services.user import user_service_factory", "token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value = None with pytest.raises(OAuthTokenError) as", "= {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response =", "= controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected @pytest.mark.usefixtures('authenticated_user')", "oauth_request(self): return OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config, auth_client, oauth_request): svc", "credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status =", "== user.userid: return user user_svc.fetch.side_effect = fake_fetch return user @pytest.fixture", "view() assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self,", "pytest from oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common import Request as", "controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user,", "pyramid_request.POST['token'] = 'the-token' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture", "pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def authenticated_user(self, factories,", "return user user_svc.fetch.side_effect = fake_fetch return user @pytest.fixture def routes(self,", "200) pyramid_config.register_service(svc, name='oauth_provider') return svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request):", "'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token", "({}, body, 200) assert controller.post() == {'access_token': 'the-access-token'} def test_it_raises_when_error(self,", "import pytest from oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common import Request", "'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True response", "svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider')", "['annotation:read', 'annotation:write'] credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request}", "utc_iso8601 from h.views import api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc')", "description', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error_description'] == 'error", "authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name):", "= 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token =", "headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status = 302", "{ 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self,", "@pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({},", "oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.post_web_message() assert response['state']", "parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query) ==", "InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name) view()", "(scopes, credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status", "(headers, body, status) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture def auth_client(self,", "= views.api_token_error(context, pyramid_request) assert result['error_description'] == 'error description' def test_it_skips_description_when_missing(self,", "pyramid_request.response.status_code == 403 def test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('', 'error_type')", "views.debug_token(pyramid_request) assert result == {'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name':", "authenticated_user, pyramid_request, view_name): auth_client.trusted = True view = getattr(controller, view_name)", "= getattr(controller, view_name) view() assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get',", "= '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected def test_post_web_message_returns_expected_context(self, controller, auth_client):", "factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config,", "def fake_fetch(userid): if userid == user.userid: return user user_svc.fetch.side_effect =", "= fake_fetch return user @pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login', '/login')", "def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted = True", "OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config, auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService,", "exc.value.body == body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST'", "'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted = True", "assert exc.value.type == 'missing_token' assert 'Bearer token is missing' in", "utf-8 -*- from __future__ import unicode_literals import datetime import json", "oauth_provider(self, pyramid_config, auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True) scopes =", "'origin': 'http://client.com', 'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider):", "or is expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token):", "token_service.fetch.return_value = developer_token result = views.debug_token(pyramid_request) assert result == {'userid':", "views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value", "'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc:", "def test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the error message', 'error_type', status_code=403)", "@pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!')", "status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code == 403 def test_it_returns_the_error(self, pyramid_request):", "exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token is", "test_post_web_message_returns_expected_context(self, controller, auth_client): response = controller.post_web_message() assert response == {", "h._compat import urlparse from h.exceptions import OAuthTokenError from h.models.auth_client import", "assert 'error_description' not in result def test_it_skips_description_when_empty(self, pyramid_request): context =", "= json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body, 200) assert controller.post()", "result == {'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at':", "views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code == 403 def test_it_returns_the_error(self, pyramid_request): context", "view_name) view() parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login' assert", "def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value =", "auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers,", "['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self,", "authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode = response_mode view = getattr(controller,", "[pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message', 'get_web_message'), ])", "pyramid_request) @pytest.fixture def oauth_request(self): return OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config,", "views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message'])", "'missing_token' assert 'Bearer token is missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self,", "pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture", "pyramid_request) assert result['error_description'] == 'error description' def test_it_skips_description_when_missing(self, pyramid_request): context", "result = views.api_token_error(context, pyramid_request) assert result['error_description'] == 'error description' def", "oauth_provider.create_token_response.return_value = ({}, body, 200) assert controller.post() == {'access_token': 'the-access-token'}", "pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return svc @pytest.fixture", "pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture def oauth_token(self, factories): authclient", "def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True assert controller.request.override_renderer is", "auth_client, authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode = response_mode view =", "'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials) headers = {'Location':", "exc.value.message def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token')", "'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False} @pytest.fixture def token_service(self, pyramid_config,", "} def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers", "developer_token.value token_service.fetch.return_value = developer_token result = views.debug_token(pyramid_request) assert result ==", "h.services.oauth_validator import DEFAULT_SCOPES from h.services.user import user_service_factory from h.util.datetime import", "assert response['state'] is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller,", "test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({},", "'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code == 403 def test_it_returns_the_error(self,", "'{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.get_web_message() assert", "controller, auth_client, authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode = response_mode view", "= True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None,", "view_name): with pytest.raises(httpexceptions.HTTPFound) as exc: view = getattr(controller, view_name) view()", "auth_client.trusted = True response = controller.get_web_message() assert response == {", "is None controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self,", "h.models.auth_client import ResponseType from h.services.auth_token import auth_token_service_factory from h.services.oauth_provider import", "oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller):", "'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError)", "response.location == expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted =", "controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller,", "pyramid_request.auth_token = developer_token.value token_service.fetch.return_value = developer_token result = views.debug_token(pyramid_request) assert", "developer_token(self, factories): return factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context", "response_mode, view_name): oauth_request.response_mode = response_mode view = getattr(controller, view_name) assert", "= 'the-token' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def", "view() parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query)", "auth_client): response = controller.post_web_message() assert response == { 'code': 'abcdef123456',", "name='user') return svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar'", "def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}',", "controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers =", "'{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def", "'Bearer token is missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token", "oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read', 'annotation:write'] credentials", "= controller.post_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com',", "def routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self,", "views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token does not", "def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def", "@pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self, controller, pyramid_request, view_name): view", "= controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected def", "{ 'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state':", "pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self,", "pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def", "= OAuthTokenError('the error message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code", "utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False} @pytest.fixture def token_service(self, pyramid_config, pyramid_request):", "= developer_token result = views.debug_token(pyramid_request) assert result == {'userid': developer_token.userid,", "\\ '&state=foobar' + \\ '&scope=exploit' view = getattr(controller, view_name) view()", "svc @pytest.fixture def auth_client(self, factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code)", "controller, auth_client, oauth_provider): auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)}", "response_mode view = getattr(controller, view_name) assert view() == { 'client_id':", "None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name):", "h.services.user import user_service_factory from h.util.datetime import utc_iso8601 from h.views import", "test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({},", "Client') expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture", "def oauth_provider(self, pyramid_config, auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True) scopes", "'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted =", "is None @pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer = None return", "None status = 302 svc.create_authorization_response.return_value = (headers, body, status) pyramid_config.register_service(svc,", "if userid == user.userid: return user user_svc.fetch.side_effect = fake_fetch return", "pyramid_request): context = OAuthTokenError('', 'error_type') result = views.api_token_error(context, pyramid_request) assert", "TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method,", "= {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status = 302 svc.create_authorization_response.return_value", "credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value =", "view() == { 'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode, 'response_type':", "assert parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth':", "@pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({},", "def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value =", "TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method,", "== 'missing_token' assert 'Bearer token does not exist or is", "@pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!')", "'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired':", "getattr(controller, view_name) assert view() == { 'client_id': auth_client.id, 'client_name': auth_client.name,", "assert response.location == expected def test_post_web_message_returns_expected_context(self, controller, auth_client): response =", "== expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True", "'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self,", "svc @pytest.fixture def oauth_token(self, factories): authclient = factories.AuthClient(name='Example Client') expires", "user_svc.fetch.side_effect = fake_fetch return user @pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login',", "controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller,", "= mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return", "pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type", "factories): authclient = factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10)", "= views.debug_token(pyramid_request) assert result == {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at':", "getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self,", "datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self, factories):", "message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code == 403 def", "controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as exc: view = getattr(controller,", "= mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture def oauth_token(self,", "OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not in", "auth_client, authenticated_user, pyramid_request): auth_client.trusted = True response = controller.get() expected", "-*- coding: utf-8 -*- from __future__ import unicode_literals import datetime", "pyramid_config.register_service(svc, name='oauth_provider') return svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token", "oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response =", "'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials) headers =", "'http://example.com/auth?client_id=the-client-id' + \\ '&response_type=code' + \\ '&state=foobar' + \\ '&scope=exploit'", "pyramid_request): pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self):", "h.exceptions import OAuthTokenError from h.models.auth_client import ResponseType from h.services.auth_token import", "(headers, None, 302) response = controller.get_web_message() assert response['state'] is None", "== 'error_type' def test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error description', 'error_type')", "view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller,", "assert 'Bearer token is missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request):", "import auth_token_service_factory from h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES", "assert 'Bearer token is missing' in exc.value.message def test_it_validates_token(self, pyramid_request,", "result = views.api_token_error(context, pyramid_request) assert result['error'] == 'error_type' def test_it_returns_error_description(self,", "@pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True response =", "'error description' def test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None, 'invalid_request') result", "def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers =", "controller): response = controller.post() assert response == {} def test_it_raises_when_error(self,", "oauth_request.response_mode = response_mode view = getattr(controller, view_name) assert view() ==", "'http://client.com', 'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted", "body, 200) assert controller.post() == {'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller,", "response = controller.post() assert response == {} def test_it_raises_when_error(self, controller,", "return OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config, auth_client, oauth_request): svc =", "from h.services.auth_token import auth_token_service_factory from h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator", "'1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response'])", "pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body = json.dumps({'access_token': 'the-access-token'})", "assert exc.value.type == 'missing_token' assert 'Bearer token does not exist", "pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc,", "getattr(controller, view_name) view() parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login'", "def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' +", "controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self,", "expires=expires) @pytest.fixture def developer_token(self, factories): return factories.DeveloperToken() class TestAPITokenError(object): def", "body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body, 400) with", "factories): return factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context =", "['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with", "view = getattr(controller, view_name) view() assert exc.value.description == 'boom!' def", "controller.post() assert response == {} def test_it_raises_when_error(self, controller, oauth_provider): body", "exc: controller.post() assert exc.value.body == body @pytest.fixture def controller(self, pyramid_request):", "assert result['error'] == 'error_type' def test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error", "from h._compat import urlparse from h.exceptions import OAuthTokenError from h.models.auth_client", "import user_service_factory from h.util.datetime import utc_iso8601 from h.views import api_auth", "'{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self,", "'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service,", "pyramid_config.register_service(svc, name='user') return svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url =", "auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read', 'annotation:write']", "== 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name):", "controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect", "test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \\", "name='oauth_provider') return svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token =", "assert response['state'] is None @pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer =", "controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected def test_post_web_message_returns_expected_context(self,", "@pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] = 'authorization_code'", "user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid == user.userid:", "mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture def oauth_token(self, factories):", "'error_type') result = views.api_token_error(context, pyramid_request) assert result['error'] == 'error_type' def", "= OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not" ]
[ "requirements. t.write(\"jamroot.jam\", \"\"\" project : requirements <link>static:<define>STATIC ; exe a", ";\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Test conditionals in project requirements.", "compiled with \"STATIC\" # define. t.write(\"a.cpp\", \"\"\"\\ #ifdef STATIC int", "a : a.cpp l ; \"\"\") t.write(\"l.cpp\", \"int i;\") t.run_build_system([\"link=static\"])", "LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties.", "is compiled with \"STATIC\" # define. t.write(\"a.cpp\", \"\"\"\\ #ifdef STATIC", "# Copyright 2003 <NAME> # Copyright 2002, 2003, 2004 <NAME>", "Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt", "# Test conditionals in project requirements. t.write(\"jamroot.jam\", \"\"\" project :", "\"\"\" project : requirements <link>static:<define>STATIC ; exe a : a.cpp", "<link>static:<define>STATIC ; exe a : a.cpp ; \"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\")", "= BoostBuild.Tester() # Arrange a project which will build only", "Conditionals inside # usage requirement were not being evaluated. t.write(\"jamroot.jam\",", "inside # usage requirement were not being evaluated. t.write(\"jamroot.jam\", \"\"\"", "Regression test for a bug found by <NAME>ani. Conditionals inside", "a : a.cpp ; \"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Regression", "project requirements. t.write(\"jamroot.jam\", \"\"\" project : requirements <link>static:<define>STATIC ; exe", "a.cpp : <link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Test conditionals", "or copy at # http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties. import", "Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or", "requirements. t.write(\"jamroot.jam\", \"exe a : a.cpp : <link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"])", "int main() {} #endif \"\"\") # Test conditionals in target", "#!/usr/bin/python # Copyright 2003 <NAME> # Copyright 2002, 2003, 2004", "a : a.cpp : <link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") #", "a.cpp l ; \"\"\") t.write(\"l.cpp\", \"int i;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.cleanup()", "project which will build only if 'a.cpp' is compiled with", "target requirements. t.write(\"jamroot.jam\", \"exe a : a.cpp : <link>static:<define>STATIC ;\")", "# Copyright 2002, 2003, 2004 <NAME> # Distributed under the", "bug found by <NAME>ani. Conditionals inside # usage requirement were", "# (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt)", "2003, 2004 <NAME> # Distributed under the Boost Software License,", ": : : <link>static:<define>STATIC ; exe a : a.cpp l", ": a.cpp ; \"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Regression test", "t.write(\"jamroot.jam\", \"\"\" lib l : l.cpp : : : <link>static:<define>STATIC", "2004 <NAME> # Distributed under the Boost Software License, Version", "; exe a : a.cpp ; \"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\")", "project : requirements <link>static:<define>STATIC ; exe a : a.cpp ;", "\"exe a : a.cpp : <link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\")", "<link>static:<define>STATIC ; exe a : a.cpp l ; \"\"\") t.write(\"l.cpp\",", "t.write(\"jamroot.jam\", \"exe a : a.cpp : <link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\")", ": a.cpp : <link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Test", "requirements <link>static:<define>STATIC ; exe a : a.cpp ; \"\"\") t.run_build_system([\"link=static\"])", "import BoostBuild t = BoostBuild.Tester() # Arrange a project which", "# define. t.write(\"a.cpp\", \"\"\"\\ #ifdef STATIC int main() {} #endif", "only if 'a.cpp' is compiled with \"STATIC\" # define. t.write(\"a.cpp\",", "<link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Test conditionals in project", "2002, 2003, 2004 <NAME> # Distributed under the Boost Software", "main() {} #endif \"\"\") # Test conditionals in target requirements.", "a bug found by <NAME>ani. Conditionals inside # usage requirement", "a.cpp ; \"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Regression test for", "at # http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties. import BoostBuild t", "# Arrange a project which will build only if 'a.cpp'", "by <NAME>ani. Conditionals inside # usage requirement were not being", "exe a : a.cpp l ; \"\"\") t.write(\"l.cpp\", \"int i;\")", "build only if 'a.cpp' is compiled with \"STATIC\" # define.", "t.write(\"jamroot.jam\", \"\"\" project : requirements <link>static:<define>STATIC ; exe a :", "\"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Regression test for a bug", "usage requirement were not being evaluated. t.write(\"jamroot.jam\", \"\"\" lib l", "'a.cpp' is compiled with \"STATIC\" # define. t.write(\"a.cpp\", \"\"\"\\ #ifdef", "conditionals in target requirements. t.write(\"jamroot.jam\", \"exe a : a.cpp :", "License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy", "t.rm(\"bin\") # Regression test for a bug found by <NAME>ani.", "# usage requirement were not being evaluated. t.write(\"jamroot.jam\", \"\"\" lib", "t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Regression test for a bug found by", "t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Test conditionals in project requirements. t.write(\"jamroot.jam\", \"\"\"", "found by <NAME>ani. Conditionals inside # usage requirement were not", "\"\"\") # Test conditionals in target requirements. t.write(\"jamroot.jam\", \"exe a", "for a bug found by <NAME>ani. Conditionals inside # usage", "l : l.cpp : : : <link>static:<define>STATIC ; exe a", "Test conditional properties. import BoostBuild t = BoostBuild.Tester() # Arrange", "t.rm(\"bin\") # Test conditionals in project requirements. t.write(\"jamroot.jam\", \"\"\" project", "# http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties. import BoostBuild t =", "; \"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Regression test for a", "{} #endif \"\"\") # Test conditionals in target requirements. t.write(\"jamroot.jam\",", "<NAME> # Copyright 2002, 2003, 2004 <NAME> # Distributed under", "t = BoostBuild.Tester() # Arrange a project which will build", "<filename>tools/build/v2/test/conditionals.py #!/usr/bin/python # Copyright 2003 <NAME> # Copyright 2002, 2003,", "#endif \"\"\") # Test conditionals in target requirements. t.write(\"jamroot.jam\", \"exe", "were not being evaluated. t.write(\"jamroot.jam\", \"\"\" lib l : l.cpp", "evaluated. t.write(\"jamroot.jam\", \"\"\" lib l : l.cpp : : :", "#ifdef STATIC int main() {} #endif \"\"\") # Test conditionals", "2003 <NAME> # Copyright 2002, 2003, 2004 <NAME> # Distributed", "conditional properties. import BoostBuild t = BoostBuild.Tester() # Arrange a", "Copyright 2002, 2003, 2004 <NAME> # Distributed under the Boost", "test for a bug found by <NAME>ani. Conditionals inside #", "under the Boost Software License, Version 1.0. # (See accompanying", ": <link>static:<define>STATIC ;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Test conditionals in", "in project requirements. t.write(\"jamroot.jam\", \"\"\" project : requirements <link>static:<define>STATIC ;", "lib l : l.cpp : : : <link>static:<define>STATIC ; exe", "with \"STATIC\" # define. t.write(\"a.cpp\", \"\"\"\\ #ifdef STATIC int main()", "copy at # http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties. import BoostBuild", "file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Test conditional", "if 'a.cpp' is compiled with \"STATIC\" # define. t.write(\"a.cpp\", \"\"\"\\", "\"\"\" lib l : l.cpp : : : <link>static:<define>STATIC ;", "1.0. # (See accompanying file LICENSE_1_0.txt or copy at #", ": <link>static:<define>STATIC ; exe a : a.cpp l ; \"\"\")", "BoostBuild.Tester() # Arrange a project which will build only if", ": requirements <link>static:<define>STATIC ; exe a : a.cpp ; \"\"\")", "properties. import BoostBuild t = BoostBuild.Tester() # Arrange a project", "<NAME> # Distributed under the Boost Software License, Version 1.0.", "http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties. import BoostBuild t = BoostBuild.Tester()", "\"STATIC\" # define. t.write(\"a.cpp\", \"\"\"\\ #ifdef STATIC int main() {}", "requirement were not being evaluated. t.write(\"jamroot.jam\", \"\"\" lib l :", "l.cpp : : : <link>static:<define>STATIC ; exe a : a.cpp", "conditionals in project requirements. t.write(\"jamroot.jam\", \"\"\" project : requirements <link>static:<define>STATIC", ": a.cpp l ; \"\"\") t.write(\"l.cpp\", \"int i;\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\")", "t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Regression test for a bug found", "accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Test", "Test conditionals in project requirements. t.write(\"jamroot.jam\", \"\"\" project : requirements", "which will build only if 'a.cpp' is compiled with \"STATIC\"", "# Test conditionals in target requirements. t.write(\"jamroot.jam\", \"exe a :", "<NAME>ani. Conditionals inside # usage requirement were not being evaluated.", "(See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) #", ": l.cpp : : : <link>static:<define>STATIC ; exe a :", "will build only if 'a.cpp' is compiled with \"STATIC\" #", "# Distributed under the Boost Software License, Version 1.0. #", "exe a : a.cpp ; \"\"\") t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") #", "t.run_build_system([\"link=static\"]) t.expect_addition(\"bin/$toolset/debug/link-static/a.exe\") t.rm(\"bin\") # Test conditionals in project requirements. t.write(\"jamroot.jam\",", "Copyright 2003 <NAME> # Copyright 2002, 2003, 2004 <NAME> #", "Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at", "in target requirements. t.write(\"jamroot.jam\", \"exe a : a.cpp : <link>static:<define>STATIC", "Arrange a project which will build only if 'a.cpp' is", "t.write(\"a.cpp\", \"\"\"\\ #ifdef STATIC int main() {} #endif \"\"\") #", ": : <link>static:<define>STATIC ; exe a : a.cpp l ;", "being evaluated. t.write(\"jamroot.jam\", \"\"\" lib l : l.cpp : :", "# Test conditional properties. import BoostBuild t = BoostBuild.Tester() #", "\"\"\"\\ #ifdef STATIC int main() {} #endif \"\"\") # Test", "not being evaluated. t.write(\"jamroot.jam\", \"\"\" lib l : l.cpp :", "define. t.write(\"a.cpp\", \"\"\"\\ #ifdef STATIC int main() {} #endif \"\"\")", "STATIC int main() {} #endif \"\"\") # Test conditionals in", "# Regression test for a bug found by <NAME>ani. Conditionals", "Test conditionals in target requirements. t.write(\"jamroot.jam\", \"exe a : a.cpp", "the Boost Software License, Version 1.0. # (See accompanying file", "a project which will build only if 'a.cpp' is compiled", "BoostBuild t = BoostBuild.Tester() # Arrange a project which will", "; exe a : a.cpp l ; \"\"\") t.write(\"l.cpp\", \"int", "Distributed under the Boost Software License, Version 1.0. # (See" ]
[ "-> None: py_class = PythonClass(value=10) assert py_class.value == 10 def", "py_class.value == 10 def test_example_class() -> None: example = ExampleClass(value=11)", "import PythonClass, ExampleClass def test_python_class() -> None: py_class = PythonClass(value=10)", "assert py_class.value == 10 def test_example_class() -> None: example =", "<gh_stars>1-10 from setuptools_rust_starter import PythonClass, ExampleClass def test_python_class() -> None:", "def test_example_class() -> None: example = ExampleClass(value=11) assert example.value ==", "setuptools_rust_starter import PythonClass, ExampleClass def test_python_class() -> None: py_class =", "ExampleClass def test_python_class() -> None: py_class = PythonClass(value=10) assert py_class.value", "10 def test_example_class() -> None: example = ExampleClass(value=11) assert example.value", "from setuptools_rust_starter import PythonClass, ExampleClass def test_python_class() -> None: py_class", "test_example_class() -> None: example = ExampleClass(value=11) assert example.value == 11", "test_python_class() -> None: py_class = PythonClass(value=10) assert py_class.value == 10", "= PythonClass(value=10) assert py_class.value == 10 def test_example_class() -> None:", "PythonClass, ExampleClass def test_python_class() -> None: py_class = PythonClass(value=10) assert", "== 10 def test_example_class() -> None: example = ExampleClass(value=11) assert", "PythonClass(value=10) assert py_class.value == 10 def test_example_class() -> None: example", "def test_python_class() -> None: py_class = PythonClass(value=10) assert py_class.value ==", "py_class = PythonClass(value=10) assert py_class.value == 10 def test_example_class() ->", "None: py_class = PythonClass(value=10) assert py_class.value == 10 def test_example_class()" ]
[ "response = self.make_request( url=login_url, headers=self._common_headers, method=\"POST\", json=login_data, ) if response.content.decode()", "\"web\", \"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\": self._login_uid, } ) response = self.make_request(url=like_blogs_url,", "= None entry_list = self._response_data[\"d\"][\"entrylist\"] if len(entry_list) > 0: for", "self._task_id = task_id self._login_username = username self._login_password = password self._spider_name:", "if response.content.decode() != \"\": self._response_data = response.json() if self._response_data is", "== 0: url_params += f\"?{data[0]}={data[1]}\" else: url_params += f\"&{data[0]}={data[1]}\" blogs_url:", "None: login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\", login_data if email_login is not None:", "method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params = get_result.split(\"&\")[1:-1] self._login_uid = [d", "0: url_params += f\"?{data[0]}={data[1]}\" else: url_params += f\"&{data[0]}={data[1]}\" blogs_url: str", "not True ): logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response =", "CookieUtils from utils.str_utils import check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5,", "self._response_data is not None and self._response_data[\"m\"] == \"success\" ): logger.info(f\"当前正在获取第{page_no", "self._response_data = response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies(", "description, \"avatarImg\": avatar_img, \"followee\": followee, \"follower\": follower, \"likeBlogs\": like_blogs, }", "= self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else:", "+= 1 if page_no <= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5)", "phone_login is not None: login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\", login_data if email_login", "else: # logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\")", "is not None: login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\", login_data if email_login is", "headers=self._common_headers, method=\"POST\", json=login_data, ) if response.content.decode() != \"\": logger.info(\"登录成功!\") self._response_data", "return None def parse_data_with_method(self, method: str): if method == BaseSpiderParseMethodType.LoginResult:", "\"createdAt\", } if next_params is not None: req_data.update(before=next_params) url_params: str", "enumerate(req_data.items()): if index == 0: url_params += f\"?{data[0]}={data[1]}\" else: url_params", "if self._response_data is not None and self._response_data[\"m\"] == \"ok\": next_page_variable", "index, data in enumerate(req_data.items()): if index == 0: url_params +=", "import LogManager from utils.str_utils import check_is_json from config import LOG_LEVEL,", "\"type\": \"post\", \"limit\": \"20\", \"order\": \"createdAt\", } if next_params is", "self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def _test_cookies(self, cookies: Optional[str]", "List, Tuple, Optional from utils.logger_utils import LogManager from utils.str_utils import", "self._login_uid, \"device_id\": self._login_client_id, \"token\": self._login_token, \"targetUid\": self._login_uid, \"type\": \"post\", \"limit\":", "utils.exception_utils import LoginException, ParseDataException from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils", "for d in login_params if \"device_id\" in d ][0].replace(\"device_id=\", \"\")", "= self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str, Dict]]: \"\"\" 解析用户名 :return:", "total_pages += 1 self._like_blogs_total_page = total_pages entry_list = self._response_data[\"d\"][\"entryList\"] if", "import check_is_json from config import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import", "== BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif", "method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as err: logger.error(f\"解析 Redis 返回数据失败! 错误原因:", "= f\"{self._blogs_url}{url_params}\" response = self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() != \"\":", "= None self._login_uid: Optional[str] = None self._login_client_id: Optional[str] = None", "\"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), } self._like_blogs_data.append(blog_data) page_no", "% 20 if rest_count != 0: total_pages += 1 self._like_blogs_total_page", "][0].replace(\"device_id=\", \"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as err: logger.error(f\"解析", "self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def", "is not None and self._response_data[\"m\"] == \"success\" ): logger.info(f\"当前正在获取第{page_no +", "f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) # 个人数据 username = self._response_data[\"user\"][\"username\"] description =", "method: str): if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method ==", "avatar_img, \"followee\": followee, \"follower\": follower, \"likeBlogs\": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data)", "LoginException() def _parse_personal_like_blogs(self, page_no: int = 0): like_blogs_url: str =", "rest_count = total_count % 20 if rest_count != 0: total_pages", "self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str, Dict]]: \"\"\" 解析用户名 :return: 结果", "后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data(", "= { \"blogId\": entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"],", "Optional[str] = None): req_data: dict = { \"src\": \"web\", \"uid\":", "test_response = self.make_request( url=test_user_url, headers=test_request_headers ) if ( test_response.status_code !=", ":return: 结果 \"\"\" phone_login = check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data:", "is None: login_url, login_data = self._check_username() response = self.make_request( url=login_url,", ") class JuejinSpider(BaseSpider): def __init__(self, task_id: str, username: str, password:", "self._response_data[\"clientId\"] # 重要参数持久化 params: str = f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\"", "self._common_headers.update( { \"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\": \"web\", \"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\": self._login_uid,", "self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str, Dict]]: \"\"\" 解析用户名", "raise ValueError(\"Your login username is illegal!\") if phone_login is not", "__init__(self, task_id: str, username: str, password: str): self._main_url = \"https://juejin.im/auth/type\"", "None and email_login is None: raise ValueError(\"Your login username is", "utils.str_utils import check_is_json from config import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils", "data=params) # 个人数据 username = self._response_data[\"user\"][\"username\"] description = self._response_data[\"user\"][\"selfDescription\"] avatar_img", "logger.error(\"登录失败!\") raise LoginException() else: get_result: str = self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result", "self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)", "url=test_user_url, headers=test_request_headers ) if ( test_response.status_code != 200 or check_is_json(test_response.content.decode())", "task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def _parse_personal_like_blogs(self, page_no: int =", "): logger.info(f\"当前正在获取第{page_no + 1}页的数据!\") if page_no == 0: total_count =", "= { \"blogId\": personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"],", "{err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): # 公共参数 self._login_token =", "personal_data: Dict = { \"username\": username, \"description\": description, \"avatarImg\": avatar_img,", "\"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable = personal_blog[\"verifyCreatedAt\"] if", "= None) -> bool: params = self.get_data(spider_name=f\"{self._spider_name}:params\") if params is", "from utils.str_utils import check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL", "cookies: Optional[str] = None) -> bool: params = self.get_data(spider_name=f\"{self._spider_name}:params\") if", "get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params =", "= None self._response_data = None self._blogs_data: List = [] self._like_blogs_data:", "email_login = check_is_email_address(data=self._login_username) login_data: Dict = {\"password\": self._login_password} if phone_login", "self._login_cookies is None: login_url, login_data = self._check_username() response = self.make_request(", "self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) # 个人数据 username = self._response_data[\"user\"][\"username\"] description = self._response_data[\"user\"][\"selfDescription\"]", "self._response_data[\"userId\"] self._login_client_id = self._response_data[\"clientId\"] # 重要参数持久化 params: str = f\"?src=web&uid={self._login_uid}\"", "in entry_list: if entry_data is None: continue blog_data: Dict =", "Optional[str] = None self._login_client_id: Optional[str] = None self._response_data = None", "self._response_data[\"d\"][\"entryList\"] if len(entry_list) > 0: for entry_data in entry_list: if", "url_params += f\"?{data[0]}={data[1]}\" else: url_params += f\"&{data[0]}={data[1]}\" blogs_url: str =", "= f\"juejin:{self._login_username}\" self._login_cookies: Optional[str] = None self._login_token: Optional[str] = None", "self._response_data = None self._blogs_data: List = [] self._like_blogs_data: List =", "= { \"src\": \"web\", \"uid\": self._login_uid, \"device_id\": self._login_client_id, \"token\": self._login_token,", "\"X-Juejin-Uid\": self._login_uid, } ) response = self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode()", "None and self._response_data[\"m\"] == \"success\" ): logger.info(f\"当前正在获取第{page_no + 1}页的数据!\") if", "Dict]]: \"\"\" 解析用户名 :return: 结果 \"\"\" phone_login = check_is_phone_number(data=self._login_username) email_login", "= CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method(", "url=login_url, headers=self._common_headers, method=\"POST\", json=login_data, ) if response.content.decode() != \"\": logger.info(\"登录成功!\")", "= total_count // 20 rest_count = total_count % 20 if", "login_data = self._check_username() response = self.make_request( url=login_url, headers=self._common_headers, method=\"POST\", json=login_data,", "\"\": self._response_data = response.json() if self._response_data is not None and", "已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response = test_response.json() if test_json_response[\"s\"] ==", "password: str): self._main_url = \"https://juejin.im/auth/type\" self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url =", "+ 1}页的数据!\") if page_no == 0: total_count = self._response_data[\"d\"][\"total\"] total_pages", "任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException()", "= {\"password\": self._login_password} if phone_login is None and email_login is", "self._response_data[\"user\"][\"followersCount\"] like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict = { \"username\": username,", "self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id = task_id self._login_username", "= total_count % 20 if rest_count != 0: total_pages +=", "[d for d in login_params if \"token\" in d][ 0", ").to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else:", "self._response_data[\"user\"][\"avatarLarge\"] followee = self._response_data[\"user\"][\"followeesCount\"] follower = self._response_data[\"user\"][\"followersCount\"] like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"]", "self._login_uid, \"type\": \"post\", \"limit\": \"20\", \"order\": \"createdAt\", } if next_params", "try: login_params = get_result.split(\"&\")[1:-1] self._login_uid = [d for d in", "return f\"{self._main_url}/phoneNumber\", login_data if email_login is not None: login_data.update(email=self._login_username) return", "self._blogs_data: List = [] self._like_blogs_data: List = [] self._like_blogs_total_page: int", "as err: logger.error(f\"解析 Redis 返回数据失败! 错误原因: {err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult )", "\"X-Juejin-Src\": \"web\", \"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\": self._login_uid, } ) response =", ") if response.content.decode() != \"\": logger.info(\"登录成功!\") self._response_data = response.json() self._login_cookies", "// 20 rest_count = total_count % 20 if rest_count !=", "else: try: login_params = get_result.split(\"&\")[1:-1] self._login_uid = [d for d", "Redis 返回数据失败! 错误原因: {err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): #", "\"blogId\": entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt(", "data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def _test_cookies(self, cookies: Optional[str] = None)", "None self._login_client_id: Optional[str] = None self._response_data = None self._blogs_data: List", "== 1: logger.info(f\"当前掘金账号为: {self._login_username}, 状态: 已登录\") return True else: logger.error(f\"当前掘金账号登录状态:", "!= 0: total_pages += 1 self._like_blogs_total_page = total_pages entry_list =", "else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def _test_cookies(self,", "logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\") # 任务末尾", "time from typing import Dict, List, Tuple, Optional from utils.logger_utils", "Optional[str] = None self._login_token: Optional[str] = None self._login_uid: Optional[str] =", "response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies", "# 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise", "= f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict = self.get_default_headers() test_response = self.make_request( url=test_user_url,", "followee, \"follower\": follower, \"likeBlogs\": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def", "not None: login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\", login_data if email_login is not", "0 ].replace(\"uid=\", \"\") self._login_token = [d for d in login_params", "Optional[str] = None self._response_data = None self._blogs_data: List = []", "self._spider_name: str = f\"juejin:{self._login_username}\" self._login_cookies: Optional[str] = None self._login_token: Optional[str]", "= self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict = { \"username\": username, \"description\": description,", "1: logger.info(f\"当前掘金账号为: {self._login_username}, 状态: 已登录\") return True else: logger.error(f\"当前掘金账号登录状态: 已退出!\")", "test_response.status_code != 200 or check_is_json(test_response.content.decode()) is not True ): logger.error(f\"当前掘金账号登录状态:", "self._response_data[\"m\"] == \"success\" ): logger.info(f\"当前正在获取第{page_no + 1}页的数据!\") if page_no ==", "None: continue blog_data: Dict = { \"blogId\": entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"],", "follower = self._response_data[\"user\"][\"followersCount\"] like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict = {", "else: url_params += f\"&{data[0]}={data[1]}\" blogs_url: str = f\"{self._blogs_url}{url_params}\" response =", "req_data: dict = { \"src\": \"web\", \"uid\": self._login_uid, \"device_id\": self._login_client_id,", "if page_no == 0: total_count = self._response_data[\"d\"][\"total\"] total_pages = total_count", "logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish)", "page_no: int = 0): like_blogs_url: str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( {", "self._response_data[\"d\"][\"total\"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else:", ") logger.info(\"获取个人点赞博客成功!\") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)", "= response.json() if self._response_data is not None and self._response_data[\"m\"] ==", "test_response.json() if test_json_response[\"s\"] == 1: logger.info(f\"当前掘金账号为: {self._login_username}, 状态: 已登录\") return", "{ \"blogId\": entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\":", "self._response_data[\"d\"][\"total\"] total_pages = total_count // 20 rest_count = total_count %", "like_blogs_url: str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( { \"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\": \"web\",", "200 or check_is_json(test_response.content.decode()) is not True ): logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name)", "def parse_data_with_method(self, method: str): if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif", "str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict = self.get_default_headers() test_response = self.make_request(", "self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data = response.json() if", "in enumerate(req_data.items()): if index == 0: url_params += f\"?{data[0]}={data[1]}\" else:", "\"success\" ): logger.info(f\"当前正在获取第{page_no + 1}页的数据!\") if page_no == 0: total_count", "utils.time_utils import datetime_str_change_fmt from utils.exception_utils import LoginException, ParseDataException from spiders", "!= 200 or check_is_json(test_response.content.decode()) is not True ): logger.error(f\"当前掘金账号登录状态: 已退出!\")", "self._login_uid = [d for d in login_params if \"uid\" in", "\"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\": \"web\", \"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\": self._login_uid, } )", "self._parse_personal_blogs() self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if", "!= \"\": self._response_data = response.json() if self._response_data is not None", "self.make_request( url=test_user_url, headers=test_request_headers ) if ( test_response.status_code != 200 or", "self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error(\"登录失败!\") raise LoginException() else: get_result: str", "else: get_result: str = self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result is None: self.parse_data_with_method(", "method == BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if self._login_cookies is None:", ") response = self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data", "{ \"username\": username, \"description\": description, \"avatarImg\": avatar_img, \"followee\": followee, \"follower\":", "login_url, login_data = self._check_username() response = self.make_request( url=login_url, headers=self._common_headers, method=\"POST\",", "self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict = { \"username\": username, \"description\": description, \"avatarImg\":", "= [] self._like_blogs_total_page: int = 0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name)", "personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time, }", "entry_data is None: continue blog_data: Dict = { \"blogId\": entry_data[\"objectId\"],", "method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs()", "prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data: Dict = { \"blogId\": personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"],", "} ) response = self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() != \"\":", "import LoginException, ParseDataException from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils from", "# 公共参数 self._login_token = self._response_data[\"token\"] self._login_uid = self._response_data[\"userId\"] self._login_client_id =", "= datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data: Dict = { \"blogId\":", "), } self._like_blogs_data.append(blog_data) page_no += 1 if page_no <= self._like_blogs_total_page:", "\"src\": \"web\", \"uid\": self._login_uid, \"device_id\": self._login_client_id, \"token\": self._login_token, \"targetUid\": self._login_uid,", "password self._spider_name: str = f\"juejin:{self._login_username}\" self._login_cookies: Optional[str] = None self._login_token:", "# 重要参数持久化 params: str = f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\",", "20 if rest_count != 0: total_pages += 1 self._like_blogs_total_page =", "= 0): like_blogs_url: str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( { \"X-Juejin-Client\": str(self._login_client_id),", "entry_list = self._response_data[\"d\"][\"entrylist\"] if len(entry_list) > 0: for personal_blog in", "else: logger.error(\"登录失败!\") raise LoginException() else: get_result: str = self.get_data(spider_name=f\"{self._spider_name}:params\") if", "d in login_params if \"uid\" in d][ 0 ].replace(\"uid=\", \"\")", "20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\") self.update_task_status(", "= self._response_data[\"clientId\"] # 重要参数持久化 params: str = f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\"", "str): self._main_url = \"https://juejin.im/auth/type\" self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\"", "<= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data)", "f\"?{data[0]}={data[1]}\" else: url_params += f\"&{data[0]}={data[1]}\" blogs_url: str = f\"{self._blogs_url}{url_params}\" response", "login_data return None def parse_data_with_method(self, method: str): if method ==", "int = 0): like_blogs_url: str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( { \"X-Juejin-Client\":", "response = self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data =", "True ): logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response = test_response.json()", "get_result: str = self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult", "= self._response_data[\"user\"][\"selfDescription\"] avatar_img = self._response_data[\"user\"][\"avatarLarge\"] followee = self._response_data[\"user\"][\"followeesCount\"] follower =", ") raise ParseDataException() def _test_cookies(self, cookies: Optional[str] = None) ->", "else: logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def _parse_personal_like_blogs(self,", "\"follower\": follower, \"likeBlogs\": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self,", "\"description\": description, \"avatarImg\": avatar_img, \"followee\": followee, \"follower\": follower, \"likeBlogs\": like_blogs,", "= None self._login_token: Optional[str] = None self._login_uid: Optional[str] = None", "= 0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str,", "\"uid\": self._login_uid, \"device_id\": self._login_client_id, \"token\": self._login_token, \"targetUid\": self._login_uid, \"type\": \"post\",", "self._login_token, \"X-Juejin-Uid\": self._login_uid, } ) response = self.make_request(url=like_blogs_url, headers=self._common_headers) if", "\"username\": username, \"description\": description, \"avatarImg\": avatar_img, \"followee\": followee, \"follower\": follower,", "check_is_json(test_response.content.decode()) is not True ): logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False", "username: str, password: str): self._main_url = \"https://juejin.im/auth/type\" self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\"", "_check_username(self) -> Optional[Tuple[str, Dict]]: \"\"\" 解析用户名 :return: 结果 \"\"\" phone_login", "self._login_token, \"targetUid\": self._login_uid, \"type\": \"post\", \"limit\": \"20\", \"order\": \"createdAt\", }", "= get_result.split(\"&\")[1:-1] self._login_uid = [d for d in login_params if", "\"device_id\": self._login_client_id, \"token\": self._login_token, \"targetUid\": self._login_uid, \"type\": \"post\", \"limit\": \"20\",", "JuejinSpider(BaseSpider): def __init__(self, task_id: str, username: str, password: str): self._main_url", "\"\": self._response_data = response.json() if ( self._response_data is not None", "None) -> bool: params = self.get_data(spider_name=f\"{self._spider_name}:params\") if params is None:", "blogs_url: str = f\"{self._blogs_url}{url_params}\" response = self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode()", "def login(self): if self._login_cookies is None: login_url, login_data = self._check_username()", "LogManager from utils.str_utils import check_is_json from config import LOG_LEVEL, PROCESS_STATUS_FAIL", "LoginException, ParseDataException from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils", "not None: req_data.update(before=next_params) url_params: str = \"\" for index, data", "= self._response_data[\"d\"][\"entryList\"] if len(entry_list) > 0: for entry_data in entry_list:", "def _parse_personal_blogs(self, next_params: Optional[str] = None): req_data: dict = {", "entry_data in entry_list: if entry_data is None: continue blog_data: Dict", "= total_pages entry_list = self._response_data[\"d\"][\"entryList\"] if len(entry_list) > 0: for", "params = self.get_data(spider_name=f\"{self._spider_name}:params\") if params is None: return False test_user_url:", "self._like_blogs_data: List = [] self._like_blogs_total_page: int = 0 super().__init__() self._login_cookies", "avatar_img = self._response_data[\"user\"][\"avatarLarge\"] followee = self._response_data[\"user\"][\"followeesCount\"] follower = self._response_data[\"user\"][\"followersCount\"] like_blogs", "self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method ==", "== 0: total_count = self._response_data[\"d\"][\"total\"] total_pages = total_count // 20", "f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) # 个人数据 username = self._response_data[\"user\"][\"username\"]", "in d][ 0 ].replace(\"uid=\", \"\") self._login_token = [d for d", "): logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response = test_response.json() if", "return False test_json_response = test_response.json() if test_json_response[\"s\"] == 1: logger.info(f\"当前掘金账号为:", "= None): req_data: dict = { \"src\": \"web\", \"uid\": self._login_uid,", "Optional from utils.logger_utils import LogManager from utils.str_utils import check_is_json from", "check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider):", "not None: login_data.update(email=self._login_username) return f\"{self._main_url}/email\", login_data return None def parse_data_with_method(self,", "{ \"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\": \"web\", \"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\": self._login_uid, }", "\"post\", \"limit\": \"20\", \"order\": \"createdAt\", } if next_params is not", "config import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt from utils.exception_utils", "BaseSpiderParseMethodType, CookieUtils from utils.str_utils import check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers(", "logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def __init__(self,", "} self._like_blogs_data.append(blog_data) page_no += 1 if page_no <= self._like_blogs_total_page: #", "f\"{self._main_url}/phoneNumber\", login_data if email_login is not None: login_data.update(email=self._login_username) return f\"{self._main_url}/email\",", "entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data: Dict =", "ParseDataException() def _test_cookies(self, cookies: Optional[str] = None) -> bool: params", "task_id self._login_username = username self._login_password = password self._spider_name: str =", "[] self._like_blogs_total_page: int = 0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def", "\"20\", \"order\": \"createdAt\", } if next_params is not None: req_data.update(before=next_params)", "entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ),", "= \"\" for index, data in enumerate(req_data.items()): if index ==", "in entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data: Dict", "data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def _parse_personal_like_blogs(self, page_no: int = 0):", "def _test_cookies(self, cookies: Optional[str] = None) -> bool: params =", ") else: logger.error(\"登录失败!\") raise LoginException() else: get_result: str = self.get_data(spider_name=f\"{self._spider_name}:params\")", "in d ][0].replace(\"device_id=\", \"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as", "= check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data: Dict = {\"password\": self._login_password}", "page_no += 1 if page_no <= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据", "BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method", "url_params: str = \"\" for index, data in enumerate(req_data.items()): if", "for d in login_params if \"token\" in d][ 0 ].replace(\"token=\",", "self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies )", "response = self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data =", "\"\"\" 解析用户名 :return: 结果 \"\"\" phone_login = check_is_phone_number(data=self._login_username) email_login =", "login_params if \"device_id\" in d ][0].replace(\"device_id=\", \"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs )", "self._response_data is not None and self._response_data[\"m\"] == \"ok\": next_page_variable =", "if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs()", "None and self._response_data[\"m\"] == \"ok\": next_page_variable = None entry_list =", "spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error(\"登录失败!\") raise LoginException()", "next_params: Optional[str] = None): req_data: dict = { \"src\": \"web\",", "0: for personal_blog in entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\",", "= \"https://juejin.im/auth/type\" self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id =", "\"token\" in d][ 0 ].replace(\"token=\", \"\") self._login_client_id = [ d", "d][ 0 ].replace(\"uid=\", \"\") self._login_token = [d for d in", "username, \"description\": description, \"avatarImg\": avatar_img, \"followee\": followee, \"follower\": follower, \"likeBlogs\":", "False test_user_url: str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict = self.get_default_headers() test_response", "self._login_token = self._response_data[\"token\"] self._login_uid = self._response_data[\"userId\"] self._login_client_id = self._response_data[\"clientId\"] #", "self._login_uid = self._response_data[\"userId\"] self._login_client_id = self._response_data[\"clientId\"] # 重要参数持久化 params: str", "in login_params if \"device_id\" in d ][0].replace(\"device_id=\", \"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs", "headers=self._common_headers) if response.content.decode() != \"\": self._response_data = response.json() if (", "Tuple, Optional from utils.logger_utils import LogManager from utils.str_utils import check_is_json", "check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data: Dict = {\"password\": self._login_password} if", "len(entry_list) > 0: for personal_blog in entry_list: blog_create_time = datetime_str_change_fmt(", "== \"success\" ): logger.info(f\"当前正在获取第{page_no + 1}页的数据!\") if page_no == 0:", "Dict = { \"blogId\": personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\":", "continue blog_data: Dict = { \"blogId\": entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"], \"blogHref\":", "from utils.time_utils import datetime_str_change_fmt from utils.exception_utils import LoginException, ParseDataException from", "# 个人数据 username = self._response_data[\"user\"][\"username\"] description = self._response_data[\"user\"][\"selfDescription\"] avatar_img =", "[ d for d in login_params if \"device_id\" in d", "> 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\")", "task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def _test_cookies(self, cookies: Optional[str] =", "not None and self._response_data[\"m\"] == \"success\" ): logger.info(f\"当前正在获取第{page_no + 1}页的数据!\")", "self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response = test_response.json() if test_json_response[\"s\"] == 1:", "\"\" for index, data in enumerate(req_data.items()): if index == 0:", "for personal_blog in entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", )", "is None and email_login is None: raise ValueError(\"Your login username", "if \"uid\" in d][ 0 ].replace(\"uid=\", \"\") self._login_token = [d", "错误原因: {err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): # 公共参数 self._login_token", "super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str, Dict]]: \"\"\"", "= self.make_request( url=test_user_url, headers=test_request_headers ) if ( test_response.status_code != 200", "or check_is_json(test_response.content.decode()) is not True ): logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return", "= self._response_data[\"token\"] self._login_uid = self._response_data[\"userId\"] self._login_client_id = self._response_data[\"clientId\"] # 重要参数持久化", "self._check_username() response = self.make_request( url=login_url, headers=self._common_headers, method=\"POST\", json=login_data, ) if", "except Exception as err: logger.error(f\"解析 Redis 返回数据失败! 错误原因: {err}\") self.parse_data_with_method(", "if response.content.decode() != \"\": logger.info(\"登录成功!\") self._response_data = response.json() self._login_cookies =", "\"web\", \"uid\": self._login_uid, \"device_id\": self._login_client_id, \"token\": self._login_token, \"targetUid\": self._login_uid, \"type\":", "self._like_blogs_data.append(blog_data) page_no += 1 if page_no <= self._like_blogs_total_page: # TODO", "f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) # 个人数据 username =", "parse_data_with_method(self, method: str): if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method", "# logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\") #", "{ \"blogId\": personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\":", "self._login_username = username self._login_password = password self._spider_name: str = f\"juejin:{self._login_username}\"", "personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data)", "params is None: return False test_user_url: str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers:", "\"\") self._login_client_id = [ d for d in login_params if", ") except Exception as err: logger.error(f\"解析 Redis 返回数据失败! 错误原因: {err}\")", "} logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str] = None):", "None: login_url, login_data = self._check_username() response = self.make_request( url=login_url, headers=self._common_headers,", "if phone_login is not None: login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\", login_data if", "is illegal!\") if phone_login is not None: login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\",", "return f\"{self._main_url}/email\", login_data return None def parse_data_with_method(self, method: str): if", "logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def _test_cookies(self, cookies:", "from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils import check_is_phone_number,", "_parse_personal_like_blogs(self, page_no: int = 0): like_blogs_url: str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update(", "from typing import Dict, List, Tuple, Optional from utils.logger_utils import", "response.content.decode() != \"\": logger.info(\"登录成功!\") self._response_data = response.json() self._login_cookies = CookieUtils(", "f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict = self.get_default_headers() test_response = self.make_request( url=test_user_url, headers=test_request_headers", "dict = { \"src\": \"web\", \"uid\": self._login_uid, \"device_id\": self._login_client_id, \"token\":", "logger.info(\"获取个人点赞博客成功!\") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) )", "logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def _parse_personal_like_blogs(self, page_no:", "blog_create_time = datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data: Dict = {", "Optional[Tuple[str, Dict]]: \"\"\" 解析用户名 :return: 结果 \"\"\" phone_login = check_is_phone_number(data=self._login_username)", "\"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id = task_id self._login_username = username self._login_password = password", "spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils import check_is_phone_number, check_is_email_address", "= [d for d in login_params if \"token\" in d][", "None self._response_data = None self._blogs_data: List = [] self._like_blogs_data: List", "from utils.str_utils import check_is_json from config import LOG_LEVEL, PROCESS_STATUS_FAIL from", "username self._login_password = password self._spider_name: str = f\"juejin:{self._login_username}\" self._login_cookies: Optional[str]", "if \"device_id\" in d ][0].replace(\"device_id=\", \"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except", "\"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id = task_id self._login_username = username", "str, username: str, password: str): self._main_url = \"https://juejin.im/auth/type\" self._blogs_url =", "cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error(\"登录失败!\") raise LoginException() else:", "rest_count != 0: total_pages += 1 self._like_blogs_total_page = total_pages entry_list", "is not True ): logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response", "Dict = { \"blogId\": entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\":", "check_is_json from config import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt", "_parse_personal_blogs(self, next_params: Optional[str] = None): req_data: dict = { \"src\":", "raise LoginException() def _parse_personal_like_blogs(self, page_no: int = 0): like_blogs_url: str", ") blog_data: Dict = { \"blogId\": personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"], \"blogHref\":", "data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status( task_id=self._task_id,", "self._login_client_id: Optional[str] = None self._response_data = None self._blogs_data: List =", "\"blogTitle\": personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time, } self._blogs_data.append(blog_data)", "logger.error(f\"解析 Redis 返回数据失败! 错误原因: {err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self):", "BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if self._login_cookies is None: login_url, login_data", "logger.error(f\"当前掘金账号登录状态: 已退出!\") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response = test_response.json() if test_json_response[\"s\"]", "response.content.decode() != \"\": self._response_data = response.json() if self._response_data is not", "self._response_data[\"user\"][\"followeesCount\"] follower = self._response_data[\"user\"][\"followersCount\"] like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict =", "in d][ 0 ].replace(\"token=\", \"\") self._login_client_id = [ d for", ") if ( test_response.status_code != 200 or check_is_json(test_response.content.decode()) is not", "is None: return False test_user_url: str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict", "raise LoginException() else: get_result: str = self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result is", "if len(entry_list) > 0: for personal_blog in entry_list: blog_create_time =", "import check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class", "self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if self._login_cookies", ") else: try: login_params = get_result.split(\"&\")[1:-1] self._login_uid = [d for", "\"avatarImg\": avatar_img, \"followee\": followee, \"follower\": follower, \"likeBlogs\": like_blogs, } logger.debug(personal_data)", "total_count // 20 rest_count = total_count % 20 if rest_count", "-> bool: params = self.get_data(spider_name=f\"{self._spider_name}:params\") if params is None: return", "\"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\": self._login_uid, } ) response = self.make_request(url=like_blogs_url, headers=self._common_headers)", "= None self._blogs_data: List = [] self._like_blogs_data: List = []", "self._response_data[\"token\"] self._login_uid = self._response_data[\"userId\"] self._login_client_id = self._response_data[\"clientId\"] # 重要参数持久化 params:", "self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as err: logger.error(f\"解析 Redis 返回数据失败!", "if self._login_cookies is None: login_url, login_data = self._check_username() response =", "d ][0].replace(\"device_id=\", \"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as err:", "Optional[str] = None) -> bool: params = self.get_data(spider_name=f\"{self._spider_name}:params\") if params", "TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\")", "time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data: Dict = { \"blogId\": personal_blog[\"objectId\"], \"blogTitle\":", "test_user_url: str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict = self.get_default_headers() test_response =", "next_page_variable = None entry_list = self._response_data[\"d\"][\"entrylist\"] if len(entry_list) > 0:", "entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), } self._like_blogs_data.append(blog_data)", "if test_json_response[\"s\"] == 1: logger.info(f\"当前掘金账号为: {self._login_username}, 状态: 已登录\") return True", "f\"{self._main_url}/email\", login_data return None def parse_data_with_method(self, method: str): if method", "self.send_data() def login(self): if self._login_cookies is None: login_url, login_data =", "self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException()", "self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str] = None): req_data: dict", "if len(entry_list) > 0: for entry_data in entry_list: if entry_data", "not None and self._response_data[\"m\"] == \"ok\": next_page_variable = None entry_list", "self._response_data[\"d\"][\"entrylist\"] if len(entry_list) > 0: for personal_blog in entry_list: blog_create_time", "None: raise ValueError(\"Your login username is illegal!\") if phone_login is", "str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( { \"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\": \"web\", \"X-Juejin-Token\":", "self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data )", "= self._response_data[\"d\"][\"entrylist\"] if len(entry_list) > 0: for personal_blog in entry_list:", "self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id = task_id self._login_username = username self._login_password", "login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\", login_data if email_login is not None: login_data.update(email=self._login_username)", "in login_params if \"uid\" in d][ 0 ].replace(\"uid=\", \"\") self._login_token", "LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt from utils.exception_utils import LoginException,", "test_json_response[\"s\"] == 1: logger.info(f\"当前掘金账号为: {self._login_username}, 状态: 已登录\") return True else:", "_test_cookies(self, cookies: Optional[str] = None) -> bool: params = self.get_data(spider_name=f\"{self._spider_name}:params\")", "\"likeBlogs\": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str]", "\"device_id\" in d ][0].replace(\"device_id=\", \"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception", "= [d for d in login_params if \"uid\" in d][", "0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str, Dict]]:", "= f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) # 个人数据 username", "None: req_data.update(before=next_params) url_params: str = \"\" for index, data in", "if email_login is not None: login_data.update(email=self._login_username) return f\"{self._main_url}/email\", login_data return", "is not None and self._response_data[\"m\"] == \"ok\": next_page_variable = None", "formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def __init__(self, task_id: str, username:", "is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params = get_result.split(\"&\")[1:-1]", "self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): # 公共参数 self._login_token = self._response_data[\"token\"]", "def __init__(self, task_id: str, username: str, password: str): self._main_url =", "None entry_list = self._response_data[\"d\"][\"entrylist\"] if len(entry_list) > 0: for personal_blog", "personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable = personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"]", "-> Optional[Tuple[str, Dict]]: \"\"\" 解析用户名 :return: 结果 \"\"\" phone_login =", "page_no <= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: #", "d in login_params if \"token\" in d][ 0 ].replace(\"token=\", \"\")", "+= f\"&{data[0]}={data[1]}\" blogs_url: str = f\"{self._blogs_url}{url_params}\" response = self.make_request(url=blogs_url, headers=self._common_headers)", "elif method == BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if self._login_cookies is", "followee = self._response_data[\"user\"][\"followeesCount\"] follower = self._response_data[\"user\"][\"followersCount\"] like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data:", "结果 \"\"\" phone_login = check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data: Dict", "= self.get_data(spider_name=f\"{self._spider_name}:params\") if params is None: return False test_user_url: str", "\"uid\" in d][ 0 ].replace(\"uid=\", \"\") self._login_token = [d for", "username is illegal!\") if phone_login is not None: login_data.update(phoneNumber=self._login_username) return", "个人数据 username = self._response_data[\"user\"][\"username\"] description = self._response_data[\"user\"][\"selfDescription\"] avatar_img = self._response_data[\"user\"][\"avatarLarge\"]", "= self._check_username() response = self.make_request( url=login_url, headers=self._common_headers, method=\"POST\", json=login_data, )", "= response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name,", "page_no == 0: total_count = self._response_data[\"d\"][\"total\"] total_pages = total_count //", "].replace(\"token=\", \"\") self._login_client_id = [ d for d in login_params", "blog_create_time, } self._blogs_data.append(blog_data) next_page_variable = personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"] > 20:", "self.get_default_headers() test_response = self.make_request( url=test_user_url, headers=test_request_headers ) if ( test_response.status_code", "BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils import check_is_phone_number, check_is_email_address logger =", "blog_data: Dict = { \"blogId\": entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"],", "import datetime_str_change_fmt from utils.exception_utils import LoginException, ParseDataException from spiders import", "login_data if email_login is not None: login_data.update(email=self._login_username) return f\"{self._main_url}/email\", login_data", "self._login_uid, } ) response = self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() !=", "if get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params", "self._response_data = response.json() if ( self._response_data is not None and", "str, password: str): self._main_url = \"https://juejin.im/auth/type\" self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url", "self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def _parse_personal_like_blogs(self, page_no: int", "!= \"\": logger.info(\"登录成功!\") self._response_data = response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items()", "0: for entry_data in entry_list: if entry_data is None: continue", "import Dict, List, Tuple, Optional from utils.logger_utils import LogManager from", "重要参数持久化 params: str = f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params)", "公共参数 self._login_token = self._response_data[\"token\"] self._login_uid = self._response_data[\"userId\"] self._login_client_id = self._response_data[\"clientId\"]", "str = f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) # 个人数据", "blog_data: Dict = { \"blogId\": personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"],", "total_count % 20 if rest_count != 0: total_pages += 1", "( test_response.status_code != 200 or check_is_json(test_response.content.decode()) is not True ):", "\"targetUid\": self._login_uid, \"type\": \"post\", \"limit\": \"20\", \"order\": \"createdAt\", } if", "method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish: self.send_data()", "if \"token\" in d][ 0 ].replace(\"token=\", \"\") self._login_client_id = [", "Dict, List, Tuple, Optional from utils.logger_utils import LogManager from utils.str_utils", "None: login_data.update(email=self._login_username) return f\"{self._main_url}/email\", login_data return None def parse_data_with_method(self, method:", "\"ok\": next_page_variable = None entry_list = self._response_data[\"d\"][\"entrylist\"] if len(entry_list) >", "= response.json() if ( self._response_data is not None and self._response_data[\"m\"]", "and email_login is None: raise ValueError(\"Your login username is illegal!\")", "self._login_client_id = [ d for d in login_params if \"device_id\"", "self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try:", "self._login_token = [d for d in login_params if \"token\" in", "= self.make_request( url=login_url, headers=self._common_headers, method=\"POST\", json=login_data, ) if response.content.decode() !=", "0: total_pages += 1 self._like_blogs_total_page = total_pages entry_list = self._response_data[\"d\"][\"entryList\"]", "str = f\"juejin:{self._login_username}\" self._login_cookies: Optional[str] = None self._login_token: Optional[str] =", "login(self): if self._login_cookies is None: login_url, login_data = self._check_username() response", "返回数据失败! 错误原因: {err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): # 公共参数", "0 ].replace(\"token=\", \"\") self._login_client_id = [ d for d in", "{self._login_username}, 状态: 已登录\") return True else: logger.error(f\"当前掘金账号登录状态: 已退出!\") return False", "= self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data = response.json()", "and self._response_data[\"m\"] == \"success\" ): logger.info(f\"当前正在获取第{page_no + 1}页的数据!\") if page_no", "is None: raise ValueError(\"Your login username is illegal!\") if phone_login", "Optional[str] = None self._login_uid: Optional[str] = None self._login_client_id: Optional[str] =", "in login_params if \"token\" in d][ 0 ].replace(\"token=\", \"\") self._login_client_id", "if next_params is not None: req_data.update(before=next_params) url_params: str = \"\"", "self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\") self.update_task_status(", "entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), } self._like_blogs_data.append(blog_data) page_no +=", "phone_login = check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data: Dict = {\"password\":", "\"followee\": followee, \"follower\": follower, \"likeBlogs\": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs)", "{ \"src\": \"web\", \"uid\": self._login_uid, \"device_id\": self._login_client_id, \"token\": self._login_token, \"targetUid\":", "+= 1 self._like_blogs_total_page = total_pages entry_list = self._response_data[\"d\"][\"entryList\"] if len(entry_list)", "logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise", "20 rest_count = total_count % 20 if rest_count != 0:", "self._login_password} if phone_login is None and email_login is None: raise", "\"token\": self._login_token, \"targetUid\": self._login_uid, \"type\": \"post\", \"limit\": \"20\", \"order\": \"createdAt\",", "login username is illegal!\") if phone_login is not None: login_data.update(phoneNumber=self._login_username)", "None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params = get_result.split(\"&\")[1:-1] self._login_uid", "ValueError(\"Your login username is illegal!\") if phone_login is not None:", "self._response_data[\"user\"][\"selfDescription\"] avatar_img = self._response_data[\"user\"][\"avatarLarge\"] followee = self._response_data[\"user\"][\"followeesCount\"] follower = self._response_data[\"user\"][\"followersCount\"]", "class JuejinSpider(BaseSpider): def __init__(self, task_id: str, username: str, password: str):", "entry_data[\"objectId\"], \"blogTitle\": entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"],", "\"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), } self._like_blogs_data.append(blog_data) page_no += 1", "1 if page_no <= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no)", "check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def", "headers=self._common_headers) if response.content.decode() != \"\": self._response_data = response.json() if self._response_data", "\"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable =", "for index, data in enumerate(req_data.items()): if index == 0: url_params", ") raise LoginException() def _parse_personal_like_blogs(self, page_no: int = 0): like_blogs_url:", "json=login_data, ) if response.content.decode() != \"\": logger.info(\"登录成功!\") self._response_data = response.json()", "if response.content.decode() != \"\": self._response_data = response.json() if ( self._response_data", "self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data = response.json() if", "test_json_response = test_response.json() if test_json_response[\"s\"] == 1: logger.info(f\"当前掘金账号为: {self._login_username}, 状态:", "LoginException() else: get_result: str = self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result is None:", "follower, \"likeBlogs\": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params:", "if page_no <= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else:", "test_request_headers: Dict = self.get_default_headers() test_response = self.make_request( url=test_user_url, headers=test_request_headers )", "= self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data = response.json()", "if entry_data is None: continue blog_data: Dict = { \"blogId\":", "method=BaseSpiderParseMethodType.LoginResult ) else: logger.error(\"登录失败!\") raise LoginException() else: get_result: str =", "login_data: Dict = {\"password\": self._login_password} if phone_login is None and", "self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str] = None): req_data: dict =", "if ( test_response.status_code != 200 or check_is_json(test_response.content.decode()) is not True", "].replace(\"uid=\", \"\") self._login_token = [d for d in login_params if", "personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable = personal_blog[\"verifyCreatedAt\"]", "self._login_token: Optional[str] = None self._login_uid: Optional[str] = None self._login_client_id: Optional[str]", "== BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if self._login_cookies is None: login_url,", "self.get_data(spider_name=f\"{self._spider_name}:params\") if params is None: return False test_user_url: str =", "= None self._login_client_id: Optional[str] = None self._response_data = None self._blogs_data:", "entry_list = self._response_data[\"d\"][\"entryList\"] if len(entry_list) > 0: for entry_data in", "logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def", "self._main_url = \"https://juejin.im/auth/type\" self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id", "_parse_login_data(self): # 公共参数 self._login_token = self._response_data[\"token\"] self._login_uid = self._response_data[\"userId\"] self._login_client_id", "self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f\"获取到", "time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), } self._like_blogs_data.append(blog_data) page_no += 1 if page_no", "personal_blog in entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data:", "1 self._like_blogs_total_page = total_pages entry_list = self._response_data[\"d\"][\"entryList\"] if len(entry_list) >", "= check_is_email_address(data=self._login_username) login_data: Dict = {\"password\": self._login_password} if phone_login is", "0: total_count = self._response_data[\"d\"][\"total\"] total_pages = total_count // 20 rest_count", "= \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id = task_id self._login_username =", "str): if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs:", "err: logger.error(f\"解析 Redis 返回数据失败! 错误原因: {err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def", "login_params if \"token\" in d][ 0 ].replace(\"token=\", \"\") self._login_client_id =", "\"\") self._login_token = [d for d in login_params if \"token\"", "CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult", "method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): # 公共参数 self._login_token = self._response_data[\"token\"] self._login_uid", "import BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils import check_is_phone_number, check_is_email_address logger", "cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult )", "!= \"\": self._response_data = response.json() if ( self._response_data is not", "None: return False test_user_url: str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict =", "self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params = get_result.split(\"&\")[1:-1] self._login_uid =", "= self._response_data[\"user\"][\"followeesCount\"] follower = self._response_data[\"user\"][\"followersCount\"] like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict", "is not None: req_data.update(before=next_params) url_params: str = \"\" for index,", "str = \"\" for index, data in enumerate(req_data.items()): if index", "None self._blogs_data: List = [] self._like_blogs_data: List = [] self._like_blogs_total_page:", "total_pages = total_count // 20 rest_count = total_count % 20", "illegal!\") if phone_login is not None: login_data.update(phoneNumber=self._login_username) return f\"{self._main_url}/phoneNumber\", login_data", "email_login is not None: login_data.update(email=self._login_username) return f\"{self._main_url}/email\", login_data return None", "self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error(\"登录失败!\") raise", "logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error(\"登录失败!\")", "\"blogId\": personal_blog[\"objectId\"], \"blogTitle\": personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time,", "typing import Dict, List, Tuple, Optional from utils.logger_utils import LogManager", "username = self._response_data[\"user\"][\"username\"] description = self._response_data[\"user\"][\"selfDescription\"] avatar_img = self._response_data[\"user\"][\"avatarLarge\"] followee", "datetime_str_change_fmt from utils.exception_utils import LoginException, ParseDataException from spiders import BaseSpider,", "log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def __init__(self, task_id: str, username: str,", "task_id: str, username: str, password: str): self._main_url = \"https://juejin.im/auth/type\" self._blogs_url", "self.make_request( url=login_url, headers=self._common_headers, method=\"POST\", json=login_data, ) if response.content.decode() != \"\":", "= self._response_data[\"userId\"] self._login_client_id = self._response_data[\"clientId\"] # 重要参数持久化 params: str =", "self._like_blogs_total_page = total_pages entry_list = self._response_data[\"d\"][\"entryList\"] if len(entry_list) > 0:", "\"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), }", "{len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else:", "elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish:", "= username self._login_password = password self._spider_name: str = f\"juejin:{self._login_username}\" self._login_cookies:", "List = [] self._like_blogs_data: List = [] self._like_blogs_total_page: int =", "f\"{self._blogs_url}{url_params}\" response = self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() != \"\": self._response_data", "logger.info(f\"当前正在获取第{page_no + 1}页的数据!\") if page_no == 0: total_count = self._response_data[\"d\"][\"total\"]", "[] self._like_blogs_data: List = [] self._like_blogs_total_page: int = 0 super().__init__()", "( self._response_data is not None and self._response_data[\"m\"] == \"success\" ):", "from utils.exception_utils import LoginException, ParseDataException from spiders import BaseSpider, BaseSpiderParseMethodType,", "self._login_client_id = self._response_data[\"clientId\"] # 重要参数持久化 params: str = f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\"", "d for d in login_params if \"device_id\" in d ][0].replace(\"device_id=\",", "{\"password\": self._login_password} if phone_login is None and email_login is None:", "req_data.update(before=next_params) url_params: str = \"\" for index, data in enumerate(req_data.items()):", "0): like_blogs_url: str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( { \"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\":", "str = f\"{self._blogs_url}{url_params}\" response = self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() !=", "if params is None: return False test_user_url: str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\"", "next_params is not None: req_data.update(before=next_params) url_params: str = \"\" for", ") def _parse_login_data(self): # 公共参数 self._login_token = self._response_data[\"token\"] self._login_uid =", "index == 0: url_params += f\"?{data[0]}={data[1]}\" else: url_params += f\"&{data[0]}={data[1]}\"", "= self.get_default_headers() test_response = self.make_request( url=test_user_url, headers=test_request_headers ) if (", "check_is_email_address(data=self._login_username) login_data: Dict = {\"password\": self._login_password} if phone_login is None", "logger.info(f\"当前掘金账号为: {self._login_username}, 状态: 已登录\") return True else: logger.error(f\"当前掘金账号登录状态: 已退出!\") return", "= task_id self._login_username = username self._login_password = password self._spider_name: str", "ParseDataException from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils import", "= self._response_data[\"user\"][\"avatarLarge\"] followee = self._response_data[\"user\"][\"followeesCount\"] follower = self._response_data[\"user\"][\"followersCount\"] like_blogs =", "f\"juejin:{self._login_username}\" self._login_cookies: Optional[str] = None self._login_token: Optional[str] = None self._login_uid:", "headers=test_request_headers ) if ( test_response.status_code != 200 or check_is_json(test_response.content.decode()) is", "= self._response_data[\"user\"][\"followersCount\"] like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict = { \"username\":", "} self._blogs_data.append(blog_data) next_page_variable = personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"] > 20: time.sleep(0.5)", "\"limit\": \"20\", \"order\": \"createdAt\", } if next_params is not None:", "\"https://juejin.im/auth/type\" self._blogs_url = \"https://timeline-merger-ms.juejin.im/v1/get_entry_by_self\" self._like_blogs_url = \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id = task_id", "get_result.split(\"&\")[1:-1] self._login_uid = [d for d in login_params if \"uid\"", "email_login is None: raise ValueError(\"Your login username is illegal!\") if", "> 0: for entry_data in entry_list: if entry_data is None:", "= [] self._like_blogs_data: List = [] self._like_blogs_total_page: int = 0", "> 0: for personal_blog in entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"],", "self._login_uid: Optional[str] = None self._login_client_id: Optional[str] = None self._response_data =", "self._like_blogs_total_page: int = 0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self)", "else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) )", "total_pages entry_list = self._response_data[\"d\"][\"entryList\"] if len(entry_list) > 0: for entry_data", "\"blogCreateTime\": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable = personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"] >", "if self._response_data[\"d\"][\"total\"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\")", "self._response_data[\"m\"] == \"ok\": next_page_variable = None entry_list = self._response_data[\"d\"][\"entrylist\"] if", "Dict = {\"password\": self._login_password} if phone_login is None and email_login", "for d in login_params if \"uid\" in d][ 0 ].replace(\"uid=\",", ") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error(\"登录失败!\") raise LoginException() else: get_result:", "time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info(\"获取个人博客数据成功!\") else: logger.error(\"查询个人博客失败!\") self.update_task_status( task_id=self._task_id,", "None self._login_token: Optional[str] = None self._login_uid: Optional[str] = None self._login_client_id:", "List = [] self._like_blogs_total_page: int = 0 super().__init__() self._login_cookies =", "login_data.update(email=self._login_username) return f\"{self._main_url}/email\", login_data return None def parse_data_with_method(self, method: str):", "f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( { \"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\": \"web\", \"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\":", "prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), } self._like_blogs_data.append(blog_data) page_no += 1 if page_no <=", "str = self.get_data(spider_name=f\"{self._spider_name}:params\") if get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult )", "like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str] =", "= self._response_data[\"d\"][\"total\"] total_pages = total_count // 20 rest_count = total_count", "params: str = f\"?src=web&uid={self._login_uid}\" f\"&token={self._login_token}\" f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) #", "d in login_params if \"device_id\" in d ][0].replace(\"device_id=\", \"\") self.parse_data_with_method(", "total_count = self._response_data[\"d\"][\"total\"] total_pages = total_count // 20 rest_count =", "None def parse_data_with_method(self, method: str): if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data()", "f\"&{data[0]}={data[1]}\" blogs_url: str = f\"{self._blogs_url}{url_params}\" response = self.make_request(url=blogs_url, headers=self._common_headers) if", "len(entry_list) > 0: for entry_data in entry_list: if entry_data is", "# TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)}", "raise ParseDataException() def _test_cookies(self, cookies: Optional[str] = None) -> bool:", "login_params if \"uid\" in d][ 0 ].replace(\"uid=\", \"\") self._login_token =", "def _parse_login_data(self): # 公共参数 self._login_token = self._response_data[\"token\"] self._login_uid = self._response_data[\"userId\"]", "Dict = self.get_default_headers() test_response = self.make_request( url=test_user_url, headers=test_request_headers ) if", "self._response_data[\"user\"][\"username\"] description = self._response_data[\"user\"][\"selfDescription\"] avatar_img = self._response_data[\"user\"][\"avatarLarge\"] followee = self._response_data[\"user\"][\"followeesCount\"]", "return False test_user_url: str = f\"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}\" test_request_headers: Dict = self.get_default_headers()", "self._login_client_id, \"token\": self._login_token, \"targetUid\": self._login_uid, \"type\": \"post\", \"limit\": \"20\", \"order\":", "= password self._spider_name: str = f\"juejin:{self._login_username}\" self._login_cookies: Optional[str] = None", "= test_response.json() if test_json_response[\"s\"] == 1: logger.info(f\"当前掘金账号为: {self._login_username}, 状态: 已登录\")", "\"\"\" phone_login = check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data: Dict =", "None self._login_uid: Optional[str] = None self._login_client_id: Optional[str] = None self._response_data", "if phone_login is None and email_login is None: raise ValueError(\"Your", "= f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\" self._common_headers.update( { \"X-Juejin-Client\": str(self._login_client_id), \"X-Juejin-Src\": \"web\", \"X-Juejin-Token\": self._login_token,", "bool: params = self.get_data(spider_name=f\"{self._spider_name}:params\") if params is None: return False", "== BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish: self.send_data() def", "[d for d in login_params if \"uid\" in d][ 0", "BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish: self.send_data() def login(self):", "PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt from utils.exception_utils import LoginException, ParseDataException", "utils.str_utils import check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL )", "is not None: login_data.update(email=self._login_username) return f\"{self._main_url}/email\", login_data return None def", "next_page_variable = personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else:", "int = 0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self) ->", "response.content.decode() != \"\": self._response_data = response.json() if ( self._response_data is", "\"\": logger.info(\"登录成功!\") self._response_data = response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str()", "self._response_data = response.json() if self._response_data is not None and self._response_data[\"m\"]", "== \"ok\": next_page_variable = None entry_list = self._response_data[\"d\"][\"entrylist\"] if len(entry_list)", "description = self._response_data[\"user\"][\"selfDescription\"] avatar_img = self._response_data[\"user\"][\"avatarLarge\"] followee = self._response_data[\"user\"][\"followeesCount\"] follower", "None): req_data: dict = { \"src\": \"web\", \"uid\": self._login_uid, \"device_id\":", "f\"&device_id={self._login_client_id}\" f\"&current_uid={self._login_uid}\" self.set_data(spider_name=f\"{self._spider_name}:params\", data=params) # 个人数据 username = self._response_data[\"user\"][\"username\"] description", "logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str] = None): req_data:", "if rest_count != 0: total_pages += 1 self._like_blogs_total_page = total_pages", "and self._response_data[\"m\"] == \"ok\": next_page_variable = None entry_list = self._response_data[\"d\"][\"entrylist\"]", "= personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data)", "\"blogTitle\": entry_data[\"title\"], \"blogHref\": entry_data[\"originalUrl\"], \"blogViewers\": entry_data[\"viewsCount\"], \"blogCreateTime\": datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\",", "phone_login is None and email_login is None: raise ValueError(\"Your login", "= self._response_data[\"user\"][\"username\"] description = self._response_data[\"user\"][\"selfDescription\"] avatar_img = self._response_data[\"user\"][\"avatarLarge\"] followee =", "1}页的数据!\") if page_no == 0: total_count = self._response_data[\"d\"][\"total\"] total_pages =", "if index == 0: url_params += f\"?{data[0]}={data[1]}\" else: url_params +=", "self._blogs_data.append(blog_data) next_page_variable = personal_blog[\"verifyCreatedAt\"] if self._response_data[\"d\"][\"total\"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable)", "= { \"username\": username, \"description\": description, \"avatarImg\": avatar_img, \"followee\": followee,", "from utils.logger_utils import LogManager from utils.str_utils import check_is_json from config", "data in enumerate(req_data.items()): if index == 0: url_params += f\"?{data[0]}={data[1]}\"", "解析用户名 :return: 结果 \"\"\" phone_login = check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username)", "def _check_username(self) -> Optional[Tuple[str, Dict]]: \"\"\" 解析用户名 :return: 结果 \"\"\"", "datetime_str_change_fmt( time_str=entry_data[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ), } self._like_blogs_data.append(blog_data) page_no += 1 if", "from config import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt from", "条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info(\"获取个人点赞博客成功!\") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error(\"查询个人点赞博客失败!\")", "\"\") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as err: logger.error(f\"解析 Redis", "Dict = { \"username\": username, \"description\": description, \"avatarImg\": avatar_img, \"followee\":", "is None: continue blog_data: Dict = { \"blogId\": entry_data[\"objectId\"], \"blogTitle\":", "+= f\"?{data[0]}={data[1]}\" else: url_params += f\"&{data[0]}={data[1]}\" blogs_url: str = f\"{self._blogs_url}{url_params}\"", "response.json() if ( self._response_data is not None and self._response_data[\"m\"] ==", "False test_json_response = test_response.json() if test_json_response[\"s\"] == 1: logger.info(f\"当前掘金账号为: {self._login_username},", "} if next_params is not None: req_data.update(before=next_params) url_params: str =", "import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt from utils.exception_utils import", "for entry_data in entry_list: if entry_data is None: continue blog_data:", "response.json() if self._response_data is not None and self._response_data[\"m\"] == \"ok\":", "utils.logger_utils import LogManager from utils.str_utils import check_is_json from config import", "d][ 0 ].replace(\"token=\", \"\") self._login_client_id = [ d for d", "import time from typing import Dict, List, Tuple, Optional from", "Exception as err: logger.error(f\"解析 Redis 返回数据失败! 错误原因: {err}\") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult", "= \"https://user-like-wrapper-ms.juejin.im/v1/user\" self._task_id = task_id self._login_username = username self._login_password =", "self._login_password = password self._spider_name: str = f\"juejin:{self._login_username}\" self._login_cookies: Optional[str] =", "= [ d for d in login_params if \"device_id\" in", "datetime_str_change_fmt( time_str=personal_blog[\"createdAt\"], prev_fmt=\"%Y-%m-%dT%H:%M:%S.%fZ\", ) blog_data: Dict = { \"blogId\": personal_blog[\"objectId\"],", "url_params += f\"&{data[0]}={data[1]}\" blogs_url: str = f\"{self._blogs_url}{url_params}\" response = self.make_request(url=blogs_url,", "logger.info(\"登录成功!\") self._response_data = response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies)", "like_blogs = self._response_data[\"user\"][\"collectedEntriesCount\"] personal_data: Dict = { \"username\": username, \"description\":", "login_params = get_result.split(\"&\")[1:-1] self._login_uid = [d for d in login_params", "time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f\"获取到 {len(self._like_blogs_data)} 条个人点赞博客\") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data", "= LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def __init__(self, task_id:", "method=\"POST\", json=login_data, ) if response.content.decode() != \"\": logger.info(\"登录成功!\") self._response_data =", "personal_blog[\"title\"], \"blogHref\": personal_blog[\"originalUrl\"], \"blogViewers\": personal_blog[\"viewsCount\"], \"blogCreateTime\": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable", "def _parse_personal_like_blogs(self, page_no: int = 0): like_blogs_url: str = f\"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20\"", "if ( self._response_data is not None and self._response_data[\"m\"] == \"success\"", "entry_list: if entry_data is None: continue blog_data: Dict = {", "\"order\": \"createdAt\", } if next_params is not None: req_data.update(before=next_params) url_params:", "str(self._login_client_id), \"X-Juejin-Src\": \"web\", \"X-Juejin-Token\": self._login_token, \"X-Juejin-Uid\": self._login_uid, } ) response", "self._login_cookies: Optional[str] = None self._login_token: Optional[str] = None self._login_uid: Optional[str]", "LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def __init__(self, task_id: str," ]
[ "[0.5071, 0.4865, 0.4409] CIFAR_STD = [0.1942, 0.1918, 0.1958] DATA_FILE =", "range(0, 23): if i <= 7: channel_optional.append([4, 8, 12, 16])", "0.4409] CIFAR_STD = [0.1942, 0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare(", "callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100, shuffle=True, num_workers=4,", "paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random from resnet20 import", "import Convert, supernet from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig from", "0.4865, 0.4409] CIFAR_STD = [0.1942, 0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz'", "[LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100, shuffle=True,", "# channel_list.append(random.choice([ 4, 8, 12, 16])) channel_list.append(16) elif 7 <", "channel_optional.append([12, 16]) elif 7 < i <= 14: channel_optional.append([4, 8,", "40, 44, 48, 52, 56, 60, 64]) # channel_optional.append([36, 40,", "0.1 WEIGHT_DECAY = 5e-4 MOMENTUM = 0.9 BATCH_SIZE = 128", "* import paddle # supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star", "20, 24, 28, 32, 36, 40, 44, 48, 52, 56,", "16]) elif 7 < i <= 14: channel_optional.append([4, 8, 12,", "0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH),", "= './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR),", "ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for i in range(0,", "channel_list) net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for", "20, 24, 28, 32]) # channel_optional.append([20, 24, 28, 32]) elif", "OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils import utils channel_list = []", "distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH = 300 LR =", "64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net)", "channel_optional.append([20, 24, 28, 32]) elif 14 < i <= 21:", "RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])", "[0.1942, 0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR,", "HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation ) from paddle.vision.datasets import", "Convert, supernet from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils", "from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random from resnet20", "44, 48, 52, 56,60, 64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config =", "import ( ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform,", "32, 36, 40, 44, 48, 52, 56, 60, 64]) #", "<= 14: channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32])", "32, 36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) net", "# https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert, supernet from paddleslim.nas.ofa", "52, 56, 60, 64]) # channel_optional.append([36, 40, 44, 48, 52,", "resnet20 import * import paddle # supernet trainning 基于paddleslim模型压缩包 #", "12, 16])) channel_list.append(16) elif 7 < i <= 13: #", "8, 12, 16, 20, 24, 28, 32])) channel_list.append(32) elif 13", "64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) distill_config", "ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize,", "transform=transforms) test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')]", "12, 16, 20, 24, 28, 32, 36, 40, 44, 48,", "= Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set,", "momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([ RandomCrop(32,", "import Cifar100 from paddle.io import DataLoader from paddle.optimizer.lr import CosineAnnealingDecay,", "train_set = Cifar100(DATA_FILE, mode='train', transform=transforms) test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms)", "14 < i <= 21: channel_optional.append( [4, 8, 12, 16,", "MultiStepDecay, LinearWarmup import random from resnet20 import * import paddle", "13: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28,", "paddle.io import DataLoader from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import", "[] for i in range(1, 21): if 0 < i", "SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation ) from", "DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model,", "44, 48, 52, 56,60, 64])) channel_list.append(64) net = ResNet20(100, channel_list)", "transforms = Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(),", "[4, 8, 12, 16, 20, 24, 28, 32, 36, 40,", "from paddleslim.nas.ofa.utils import utils channel_list = [] for i in", "32])) channel_list.append(32) elif 13 < i <= 19: # channel_list.append(random.choice([", "i <= 21: channel_optional.append( [4, 8, 12, 16, 20, 24,", "# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32]))", "= [0.5071, 0.4865, 0.4409] CIFAR_STD = [0.1942, 0.1918, 0.1958] DATA_FILE", "import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random from resnet20 import *", "i <= 13: # channel_list.append(random.choice([ 4, 8, 12, 16, 20,", "12, 16, 20, 24, 28, 32])) channel_list.append(32) elif 13 <", "channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36,", "7: # channel_list.append(random.choice([ 4, 8, 12, 16])) channel_list.append(16) elif 7", "= paddle.Model(ofa_net) MAX_EPOCH = 300 LR = 0.1 WEIGHT_DECAY =", "8, 12, 16, 20, 24, 28, 32, 36, 40, 44,", "4, 8, 12, 16])) channel_list.append(16) elif 7 < i <=", "40, 44, 48, 52, 56,60, 64]) else: channel_optional.append( [4, 8,", "300 LR = 0.1 WEIGHT_DECAY = 5e-4 MOMENTUM = 0.9", "40, 44, 48, 52, 56,60, 64])) channel_list.append(64) else: # channel_list.append(random.choice([", "<= 21: channel_optional.append( [4, 8, 12, 16, 20, 24, 28,", "24, 28, 32, 36, 40, 44, 48, 52, 56, 60,", "OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH = 300 LR", "23): if i <= 7: channel_optional.append([4, 8, 12, 16]) #", "< i <= 7: # channel_list.append(random.choice([ 4, 8, 12, 16]))", "i <= 7: channel_optional.append([4, 8, 12, 16]) # channel_optional.append([12, 16])", "DataLoader from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random from", "= 128 CIFAR_MEAN = [0.5071, 0.4865, 0.4409] CIFAR_STD = [0.1942,", "36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) else: #", "Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation ) from paddle.vision.datasets", "sp_model = Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model =", "channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32])) channel_list.append(32)", "import * import paddle # supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim", "12, 16, 20, 24, 28, 32]) # channel_optional.append([20, 24, 28,", "model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100, shuffle=True, num_workers=4, verbose=1,", "2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms", "import paddle # supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from", "RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ])", "ResNet20(100, channel_list) net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = []", "20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60,", "Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train', transform=transforms) test_set =", "import utils channel_list = [] for i in range(1, 21):", "in range(1, 21): if 0 < i <= 7: #", "'./data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM,", "ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set", "# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) else: channel_optional.append(", "channel_list = [] for i in range(1, 21): if 0", "= 0.1 WEIGHT_DECAY = 5e-4 MOMENTUM = 0.9 BATCH_SIZE =", "for i in range(0, 23): if i <= 7: channel_optional.append([4,", "sp_net_config = supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config)", "from paddle.io import DataLoader from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup", ") from paddle.vision.datasets import Cifar100 from paddle.io import DataLoader from", "model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(),", "channel_optional.append([4, 8, 12, 16]) # channel_optional.append([12, 16]) elif 7 <", "7 < i <= 13: # channel_list.append(random.choice([ 4, 8, 12,", "# channel_optional.append([12, 16]) elif 7 < i <= 14: channel_optional.append([4,", "# supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import", "36, 40, 44, 48, 52, 56, 60, 64]) # channel_optional.append([36,", "= ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for i in", "i <= 7: # channel_list.append(random.choice([ 4, 8, 12, 16])) channel_list.append(16)", "Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train', transform=transforms) test_set = Cifar100(DATA_FILE,", "test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit(", "LR = 0.1 WEIGHT_DECAY = 5e-4 MOMENTUM = 0.9 BATCH_SIZE", "= [] for i in range(1, 21): if 0 <", "i in range(0, 23): if i <= 7: channel_optional.append([4, 8,", "MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5)))", "channel_list.append(16) elif 7 < i <= 13: # channel_list.append(random.choice([ 4,", "48, 52, 56,60, 64]) else: channel_optional.append( [4, 8, 12, 16,", "LinearWarmup import random from resnet20 import * import paddle #", "= 0.9 BATCH_SIZE = 128 CIFAR_MEAN = [0.5071, 0.4865, 0.4409]", "128 CIFAR_MEAN = [0.5071, 0.4865, 0.4409] CIFAR_STD = [0.1942, 0.1918,", "Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set =", "32]) # channel_optional.append([20, 24, 28, 32]) elif 14 < i", "# channel_optional.append([20, 24, 28, 32]) elif 14 < i <=", "RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN,", "https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert, supernet from paddleslim.nas.ofa import", "12, 16]) # channel_optional.append([12, 16]) elif 7 < i <=", "Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH", "= OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH = 300", "i <= 14: channel_optional.append([4, 8, 12, 16, 20, 24, 28,", "56,60, 64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model =", "channel_list.append(random.choice([ 4, 8, 12, 16])) channel_list.append(16) elif 7 < i", "elif 13 < i <= 19: # channel_list.append(random.choice([ 4, 8,", "= 5e-4 MOMENTUM = 0.9 BATCH_SIZE = 128 CIFAR_MEAN =", "52, 56,60, 64]) else: channel_optional.append( [4, 8, 12, 16, 20,", "< i <= 19: # channel_list.append(random.choice([ 4, 8, 12, 16,", "paddle.Model(ofa_net) MAX_EPOCH = 300 LR = 0.1 WEIGHT_DECAY = 5e-4", "Normalize, RandomRotation ) from paddle.vision.datasets import Cifar100 from paddle.io import", "13 < i <= 19: # channel_list.append(random.choice([ 4, 8, 12,", "48, 52, 56,60, 64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional)", "16, 20, 24, 28, 32, 36, 40, 44, 48, 52,", "ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH = 300 LR = 0.1", "LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([", "channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for i in range(0, 23):", "36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) net =", "channel_list.append(64) net = ResNet20(100, channel_list) net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams'))", "24, 28, 32]) elif 14 < i <= 21: channel_optional.append(", "test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100, shuffle=True, num_workers=4, verbose=1, callbacks=callbacks, )", "16, 20, 24, 28, 32]) # channel_optional.append([20, 24, 28, 32])", "Cifar100(DATA_FILE, mode='train', transform=transforms) test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks =", "for i in range(1, 21): if 0 < i <=", "52, 56,60, 64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model", "19: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28,", "import random from resnet20 import * import paddle # supernet", "<= 19: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24,", "RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation )", "net = ResNet20(100, channel_list) net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional", "RandomCrop, Normalize, RandomRotation ) from paddle.vision.datasets import Cifar100 from paddle.io", "= [] for i in range(0, 23): if i <=", "supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert,", "20, 24, 28, 32])) channel_list.append(32) elif 13 < i <=", "32, 36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) else:", "21: channel_optional.append( [4, 8, 12, 16, 20, 24, 28, 32,", "[] for i in range(0, 23): if i <= 7:", "weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)),", "# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) distill_config =", "0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000,", "random from resnet20 import * import paddle # supernet trainning", "64]) else: channel_optional.append( [4, 8, 12, 16, 20, 24, 28,", "DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0.,", "CIFAR_STD), ]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE,", "= ResNet20(100, channel_list) net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional =", "24, 28, 32]) # channel_optional.append([20, 24, 28, 32]) elif 14", "from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils import utils", "4, 8, 12, 16, 20, 24, 28, 32, 36, 40,", "<= 13: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24,", "paddle.vision.transforms import ( ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform,", "Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set,", "28, 32]) elif 14 < i <= 21: channel_optional.append( [4,", "欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert, supernet from paddleslim.nas.ofa import OFA,", "from paddle.vision.transforms import ( ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform,", "56,60, 64]) else: channel_optional.append( [4, 8, 12, 16, 20, 24,", "RandomRotation ) from paddle.vision.datasets import Cifar100 from paddle.io import DataLoader", "padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms", "48, 52, 56, 60, 64]) # channel_optional.append([36, 40, 44, 48,", "channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) else: channel_optional.append( [4,", "= Cifar100(DATA_FILE, mode='train', transform=transforms) test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks", "utils channel_list = [] for i in range(1, 21): if", "56,60, 64])) channel_list.append(64) else: # channel_list.append(random.choice([ 4, 8, 12, 16,", "RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms = Compose([ToArray(),", "64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) else:", "24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))", "5))) transforms = Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15),", "8, 12, 16])) channel_list.append(16) elif 7 < i <= 13:", "supernet from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils import", "24, 28, 32])) channel_list.append(32) elif 13 < i <= 19:", "import OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils import utils channel_list =", "paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY),", "from paddleslim.nas.ofa.convert_super import Convert, supernet from paddleslim.nas.ofa import OFA, RunConfig,", "44, 48, 52, 56,60, 64]) else: channel_optional.append( [4, 8, 12,", "supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model", "mode='train', transform=transforms) test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks = [LRSchedulerM(),", "56,60, 64])) channel_list.append(64) net = ResNet20(100, channel_list) net2 = ResNet20(100,", "distill_config = DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net", "56, 60, 64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60,", "transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE,", "i in range(1, 21): if 0 < i <= 7:", "CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1,", "paddle.vision.datasets import Cifar100 from paddle.io import DataLoader from paddle.optimizer.lr import", "CIFAR_STD = [0.1942, 0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum(", "RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms =", "21): if 0 < i <= 7: # channel_list.append(random.choice([ 4,", "= DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net =", "from resnet20 import * import paddle # supernet trainning 基于paddleslim模型压缩包", "trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert, supernet", "RunConfig, DistillConfig from paddleslim.nas.ofa.utils import utils channel_list = [] for", "val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train', transform=transforms)", "5e-4 MOMENTUM = 0.9 BATCH_SIZE = 128 CIFAR_MEAN = [0.5071,", "parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([ RandomCrop(32, padding=4),", "channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) distill_config = DistillConfig(teacher_model=net2)", "paddle # supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super", "RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation", "BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation ) from paddle.vision.datasets import Cifar100", "32]) elif 14 < i <= 21: channel_optional.append( [4, 8,", "<= 7: # channel_list.append(random.choice([ 4, 8, 12, 16])) channel_list.append(16) elif", "paddleslim.nas.ofa.utils import utils channel_list = [] for i in range(1,", "elif 7 < i <= 13: # channel_list.append(random.choice([ 4, 8,", "4, 8, 12, 16, 20, 24, 28, 32])) channel_list.append(32) elif", "44, 48, 52, 56, 60, 64]) # channel_optional.append([36, 40, 44,", "64])) channel_list.append(64) net = ResNet20(100, channel_list) net2 = ResNet20(100, channel_list)", "in range(0, 23): if i <= 7: channel_optional.append([4, 8, 12,", "channel_optional = [] for i in range(0, 23): if i", "import DataLoader from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random", "= [0.1942, 0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup(", "channel_list.append(64) else: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24,", "7 < i <= 14: channel_optional.append([4, 8, 12, 16, 20,", "( ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop,", "if i <= 7: channel_optional.append([4, 8, 12, 16]) # channel_optional.append([12,", "channel_optional.append( [4, 8, 12, 16, 20, 24, 28, 32, 36,", "elif 14 < i <= 21: channel_optional.append( [4, 8, 12,", "48, 52, 56,60, 64])) channel_list.append(64) net = ResNet20(100, channel_list) net2", "28, 32]) # channel_optional.append([20, 24, 28, 32]) elif 14 <", "ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH =", "= 300 LR = 0.1 WEIGHT_DECAY = 5e-4 MOMENTUM =", "CIFAR_MEAN = [0.5071, 0.4865, 0.4409] CIFAR_STD = [0.1942, 0.1918, 0.1958]", "40, 44, 48, 52, 56,60, 64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config", "8, 12, 16, 20, 24, 28, 32]) # channel_optional.append([20, 24,", "channel_list.append(32) elif 13 < i <= 19: # channel_list.append(random.choice([ 4,", "0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms =", "elif 7 < i <= 14: channel_optional.append([4, 8, 12, 16,", "MOMENTUM = 0.9 BATCH_SIZE = 128 CIFAR_MEAN = [0.5071, 0.4865,", "= Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net)", "28, 32])) channel_list.append(32) elif 13 < i <= 19: #", "Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD),", "# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32,", "52, 56,60, 64])) channel_list.append(64) else: # channel_list.append(random.choice([ 4, 8, 12,", "8, 12, 16]) # channel_optional.append([12, 16]) elif 7 < i", "40, 44, 48, 52, 56,60, 64])) channel_list.append(64) net = ResNet20(100,", "28, 32, 36, 40, 44, 48, 52, 56, 60, 64])", "train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100, shuffle=True, num_workers=4, verbose=1, callbacks=callbacks,", "14: channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32]) #", "= [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100,", "64])) channel_list.append(64) else: # channel_list.append(random.choice([ 4, 8, 12, 16, 20,", "CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random from resnet20 import * import", "CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)),", "learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(),", "7: channel_optional.append([4, 8, 12, 16]) # channel_optional.append([12, 16]) elif 7", "60, 64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])", "paddleslim.nas.ofa.convert_super import Convert, supernet from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig", "model = paddle.Model(ofa_net) MAX_EPOCH = 300 LR = 0.1 WEIGHT_DECAY", "WEIGHT_DECAY = 5e-4 MOMENTUM = 0.9 BATCH_SIZE = 128 CIFAR_MEAN", "ContrastTransform, RandomCrop, Normalize, RandomRotation ) from paddle.vision.datasets import Cifar100 from", "0.9 BATCH_SIZE = 128 CIFAR_MEAN = [0.5071, 0.4865, 0.4409] CIFAR_STD", "< i <= 21: channel_optional.append( [4, 8, 12, 16, 20,", "i <= 19: # channel_list.append(random.choice([ 4, 8, 12, 16, 20,", "else: channel_optional.append( [4, 8, 12, 16, 20, 24, 28, 32,", "= supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel')", "else: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28,", "52, 56,60, 64])) channel_list.append(64) net = ResNet20(100, channel_list) net2 =", "]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train',", "0 < i <= 7: # channel_list.append(random.choice([ 4, 8, 12,", "paddleslim.nas.ofa import OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils import utils channel_list", "= Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN,", "基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert, supernet from", "if 0 < i <= 7: # channel_list.append(random.choice([ 4, 8,", "net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for i", "MAX_EPOCH = 300 LR = 0.1 WEIGHT_DECAY = 5e-4 MOMENTUM", "16, 20, 24, 28, 32])) channel_list.append(32) elif 13 < i", "channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32]) # channel_optional.append([20,", "DistillConfig from paddleslim.nas.ofa.utils import utils channel_list = [] for i", "< i <= 14: channel_optional.append([4, 8, 12, 16, 20, 24,", "CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train', transform=transforms) test_set = Cifar100(DATA_FILE, mode='test',", "44, 48, 52, 56,60, 64])) channel_list.append(64) else: # channel_list.append(random.choice([ 4,", "Cifar100 from paddle.io import DataLoader from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay,", "< i <= 13: # channel_list.append(random.choice([ 4, 8, 12, 16,", "28, 32, 36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64)", "mode='test', transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH,", "paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(),", "= Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train', transform=transforms) test_set", "<= 7: channel_optional.append([4, 8, 12, 16]) # channel_optional.append([12, 16]) elif", "callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints',", "BATCH_SIZE = 128 CIFAR_MEAN = [0.5071, 0.4865, 0.4409] CIFAR_STD =", "from paddle.vision.datasets import Cifar100 from paddle.io import DataLoader from paddle.optimizer.lr", "net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for i in range(0, 23): if", "range(1, 21): if 0 < i <= 7: # channel_list.append(random.choice([", "16])) channel_list.append(16) elif 7 < i <= 13: # channel_list.append(random.choice([", "48, 52, 56,60, 64])) channel_list.append(64) else: # channel_list.append(random.choice([ 4, 8,", "16]) # channel_optional.append([12, 16]) elif 7 < i <= 14:" ]
[ "TagScriptConverter ): \"\"\"Edit a slash tag's TagScript.\"\"\" await self.slashtag_edit(ctx, tag,", "embed = e.copy() embed.description = page embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)} {slash_tags}\")", "commands from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify from redbot.core.utils.menus", "await ctx.send(await tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx: commands.Context,", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "await self.bot.wait_for(\"message\", check=pred, timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask) raise await", "ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx: commands.Context,", "command {command!r} on guild {ctx.guild!r}\") # exc info unneeded since", "no slash tags.\" if guild else \"There are no global", "TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT = 25 log", "link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) async def slashtag_edit( self,", "ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT: try: description = await self.send_and_query_response(", "(c) 2020-present phenom4n4n Permission is hereby granted, free of charge,", "ctx.send(\"Timed out, not deleting slash tags.\") if not pred.result: return", "): options: List[SlashOption] = [] guild_id = None if is_global", "tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, ) await ctx.send(await tag.initialize()) async def", "tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag:", "is_global: bool, ): description = [ self.format_tagscript(tag) for tag in", "@copy_doc(slashtag_message) async def slashtag_global_message( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False),", "= SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, ) await ctx.send(await", "stored on this server.\"\"\" await tag.send_info(ctx) @slashtag.command(\"raw\") async def slashtag_raw(self,", "def slashtag_clear(self, ctx: commands.Context): \"\"\"Clear all slash tags for this", "self.delete_quietly(ask) await self.delete_quietly(message) return message.content async def get_choices(self, ctx: commands.Context)", "@copy_doc(slashtag_add) async def slashtag_global_add( self, ctx: commands.Context, tag_name: TagName(global_priority=True), *,", "tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\")", "async def slashtag(self, ctx: commands.Context): \"\"\" Slash Tag management with", "= ( \"Send the list of choice names and values", "a slash tag's TagScript.\"\"\" await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command(\"name\") async", "not tags: message = ( \"This server has no slash", "await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self, ctx:", "@commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async def slashtag_user( self, ctx: commands.Context, tag_name: TagName(check_global=False,", "to add as choices to \" \"the tag. Choice names", "= list(pagify(description)) for index, page in enumerate(pages, 1): embed =", "parse `{choice_text}` to a choice as \" \"its name or", "argument optional, all following arguments must also be optional.*\", pred,", "choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async def slashtag_message( self, ctx: commands.Context,", "\"its name or value exceeded the 100 character limit.\", delete_after=15,", "with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx,", "list(pagify(description)) for index, page in enumerate(pages, 1): embed = e.copy()", "def copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]): doc =", "usage` \"\"\" await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"restore\", hidden=True) async def", "slash tags for this server.\"\"\" pred = MessagePredicate.yes_or_no(ctx) try: await", "= await self.send_and_query_response(ctx, query) choices = [] for choice_text in", "self.send_and_query_response( ctx, \"What should the tag description to be? (maximum", "def slashtag_global_edit_tagscript( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter", "commands.Context): pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add) async def slashtag_global_add( self, ctx: commands.Context,", "types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]): doc = original.help if isinstance(original,", "def _sub(match: re.Match) -> str: if match.group(1): return \"[p]slashtag global\"", "SlashTag interaction testing cog is already {loaded}.\") await self.config.testing_enabled.set(target_state) if", "of choice names and values you would like to add", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "ctx.embed_color()) if is_global: slash_tags = \"global slash tags\" e.set_author(name=\"Global Slash", "its name and value \" \"weren't seperated by a `:`.\",", "tags.\") if not pred.result: return await ctx.send(\"Ok, not deleting slash", "SlashOption, SlashTag from ..testing.button_menus import menu as button_menu from ..utils", "commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to", "name in SlashOptionType.__members__.keys() if not name.startswith(\"SUB\") ] valid_option_types.append(\"choices\") option_query =", "f\"Valid option types: {humanize_list([inline(n) for n in valid_option_types])}\", \"(select `string`", "server.\"\"\" pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Are you", "await ctx.send(f\"Reached max choices ({CHOICE_LIMIT}).\") break return choices async def", "tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\", \"-\"]) async def slashtag_remove(self, ctx:", "is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) async def slashtag_edit( self, ctx:", "repl def copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]): doc", "\" \"the tag. Choice names and values should be seperated", "ctx), ) if option_type.lower() == \"choices\": choices = await self.get_choices(ctx)", "link: PastebinConverter, ): await self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"],", "persons to whom the Software is furnished to do so,", "guild {ctx.guild!r}\", exc_info=error ) text = ( \"Looks like I", "continue choice = ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice) if len(choices) >= CHOICE_LIMIT:", "need to worry about it. \"\"\" app_id = id or", "else: slash_tags = \"slash tags\" e.set_author(name=\"Stored Slash Tags\", icon_url=ctx.guild.icon_url) embeds", "argument: str ): await tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove)", "len(tagscript) > limit - 3: tagscript = tagscript[:limit] + \"...\"", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False,", "ctx: commands.Context): \"\"\" Slash Tag management with TagScript. These commands", "a choice as \" \"its name or value exceeded the", "ctx, \"Is this argument required? (Y/n)\\n*Keep in mind that if", "create_slash_tag( self, ctx: commands.Context, tag_name: str, tagscript: str, *, is_global:", "ctx: commands.Context): \"\"\" See this slash tag usage stats. **Example:**", "await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command = None", "GlobalTagConverter, *, description: str ): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"])", "ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] = [] guild_id =", "self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self, ctx:", "commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False) ): \"\"\"Edit a slash", "\"Is this argument required? (Y/n)\\n*Keep in mind that if you", "for tag in tags.copy().values()}) e = discord.Embed(title=\"Slash Tag Stats\", color=await", "len(title) tagscript = tag.tagscript if len(tagscript) > limit - 3:", "ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async def slashtag_edit_tagscript( self, ctx: commands.Context, tag:", "False return SlashOption( name=name.lower(), description=description, option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True)", "aliases=[\"++\"]) async def slashtag_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *,", "= 60, ) -> str: if pred is None: pred", "in range(1, 11): try: option = await self.get_option(ctx, added_required=added_required) if", "commands.Context, options: List[SlashOption] ) -> List[SlashOption]: added_required = False for", "pred.result: return await ctx.send(\"Ok, not deleting slash tags.\") guild: discord.Guild", "async def slashtag_global_edit_name( self, ctx: commands.Context, tag: GlobalTagConverter, *, name:", "slashtagset_appid(self, ctx: commands.Context, id: int = None): \"\"\" Manually set", "you don't understand)\", ] option_type = await self.send_and_query_response( ctx, \"\\n\".join(option_query),", "*, added_required: bool = False ) -> SlashOption: name_desc =", "await self.send_and_query_response( ctx, \"Would you like to add another argument?", "ctx: commands.Context, tag_name: str, tagscript: str, *, is_global: bool =", "slash eval command.\"\"\" if not self.eval_command: return await ctx.send(\"The eval", "color=await ctx.embed_color()) embeds = [] for usage_data in chunks(counter.most_common(), 10):", "self.view_slash_tags(ctx, tags, is_global=False) async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild", "self.eval_command = slasheval.id await ctx.send(\"`/eval` has been registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\")", "name be and description be?\", \"The argument name and description", "== ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Would", "return await ctx.send(\"There are no slash tags on this server.\")", ") try: await command.register() except discord.Forbidden as error: log.error( \"Failed", ") continue if not CHOICE_RE.match(choice_text): await ctx.send( f\"Failed to parse", "furnished to do so, subject to the following conditions: The", "def slashtagset(self, ctx: commands.Context): \"\"\"Manage SlashTags settings.\"\"\" @slashtagset.command(\"settings\") async def", "\" \"should be seperated by `|`. Example:\\n`dog:Doggo|cat:Catto`\" ) response =", "else original.__doc__ doc = TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command): overriden._help_override", "tagscript[:limit] + \"...\" tagscript = tagscript.replace(\"\\n\", \" \") return f\"{title}{discord.utils.escape_markdown(tagscript)}\"", "slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore) async def", "loaded = \"loaded\" if target_state else \"unloaded\" return await ctx.send(f\"The", "\"Send the list of choice names and values you would", "| {len(tags)} {slash_tags}\") embeds.append(embed) # await menu(ctx, embeds, DEFAULT_CONTROLS) await", "counter = Counter({tag.name: tag.uses for tag in tags.copy().values()}) e =", "legacy bots. If you don't know what this means, you", "\"Software\"), to deal in the Software without restriction, including without", "): \"\"\"Edit a slash tag's description.\"\"\" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\",", "slash eval command for debugging.\"\"\" if self.eval_command: return await ctx.send(\"An", "@commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user) async def slashtag_global_user( self, ctx: commands.Context, tag_name:", "aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter,", "ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self, ctx: commands.Context,", "Add a user command tag with TagScript. [Slash tag usage", "async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter): \"\"\"Get info", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "choices.append(choice) if len(choices) >= CHOICE_LIMIT: await ctx.send(f\"Reached max choices ({CHOICE_LIMIT}).\")", "def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Get a slash", "= False for i in range(1, 11): try: option =", "*, link: PastebinConverter, ): \"\"\" Add a slash tag with", "ctx: commands.Context, tag: GuildTagConverter, argument: str ): await tag.edit_single_option(ctx, argument)", "tag.initialize()) async def get_options( self, ctx: commands.Context, options: List[SlashOption] )", "slashtag_global_edit_description( self, ctx: commands.Context, tag: GlobalTagConverter, *, description: str ):", "def slashtag_global_add( self, ctx: commands.Context, tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter,", "ctx.guild) @commands.is_owner() @slashtag.command(\"clear\", hidden=True) async def slashtag_clear(self, ctx: commands.Context): \"\"\"Clear", "description = \"\\n\".join(description) e = discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags", "self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async def create_slash_tag( self, ctx: commands.Context,", "settings.\"\"\" @slashtagset.command(\"settings\") async def slashtagset_settings(self, ctx: commands.Context): \"\"\"View SlashTags settings.\"\"\"", "import ( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter, ) from", "files (the \"Software\"), to deal in the Software without restriction,", "f\"Eval command: {eval_command}\", f\"Test cog loaded: {testing_enabled}\", ] embed =", "def slashtag_global_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ):", "is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx: commands.Context): await", "\"global \" name = match.group(0) repl += name if name.istitle():", "async def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx)", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a slash", "await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"restore\", hidden=True) async def slashtag_restore(self, ctx:", "tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message) async def slashtag_global_message( self, ctx:", "software and associated documentation files (the \"Software\"), to deal in", "asyncio.TimeoutError: await ctx.send(\"Query timed out, not adding arguments.\") else: if", "\"What should the argument name be and description be?\", \"The", "= SlashOptionType[option_type.upper()] if not added_required: pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response(", "tag: GuildTagConverter): \"\"\"Delete a slash tag.\"\"\" await ctx.send(await tag.delete()) @slashtag.command(\"info\")", "MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Would you like to add", "for more information on slash tag arguments. \"\"\" await tag.edit_options(ctx)", "is hereby granted, free of charge, to any person obtaining", "the SlashTag interaction development test cog. \"\"\" target_state = (", "await self.view_slash_tags(ctx, tags, is_global=False) async def show_slash_tag_usage(self, ctx: commands.Context, guild:", "async def send_and_query_response( self, ctx: commands.Context, query: str, pred: MessagePredicate", "self, name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type, ) try: await command.register()", "await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag: SlashTag, limit: int =", "e = discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags = \"global slash", "pred ) except asyncio.TimeoutError: await ctx.send(\"Query timed out, not adding", "index, page in enumerate(pages, 1): embed = e.copy() embed.description =", "to the following conditions: The above copyright notice and this", "return overriden return decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"]) async def", "str, pred: MessagePredicate = None, *, timeout: int = 60,", "ctx: commands.Context, tag: GuildTagConverter, argument: str ): \"\"\"Edit a single", "doc else: overriden.__doc__ = doc return overriden return decorator class", "def slashtag_list(self, ctx: commands.Context): \"\"\"View stored slash tags.\"\"\" tags =", "permit persons to whom the Software is furnished to do", "icon_url=ctx.me.avatar_url) else: slash_tags = \"slash tags\" e.set_author(name=\"Stored Slash Tags\", icon_url=ctx.guild.icon_url)", "format_tagscript(cls, tag: SlashTag, limit: int = 60) -> str: title", "do so, subject to the following conditions: The above copyright", "self, ctx: commands.Context, *, added_required: bool = False ) ->", "guild else self.global_tag_cache if not tags: message = ( \"This", "aliases=[\"delete\", \"-\"]) async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):", "any person obtaining a copy of this software and associated", "is already registered.\") slasheval = ApplicationCommand( self, name=\"eval\", description=\"SlashTags debugging", "the application ID for [botname] slash commands if it differs", "eval command is already registered.\") slasheval = ApplicationCommand( self, name=\"eval\",", "tag's description.\"\"\" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async def slashtag_edit_arguments(self,", "TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(ctx, tag_name, tagscript,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "slash tag's name.\"\"\" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\") async def slashtag_edit_description(", "= MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, \"Is this argument required? (Y/n)\\n*Keep", "= MessagePredicate.same_context(ctx) ask = await ctx.send(query) try: message = await", "be and description be?\", \"The argument name and description should", "f\"Application ID: **{self.application_id}**\", f\"Eval command: {eval_command}\", f\"Test cog loaded: {testing_enabled}\",", "the bot user ID. This only applies to legacy bots.", "= box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")), \"prolog\") embed = e.copy() embed.description =", "with TagScript. These commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation", "slashtag_global_edit( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ):", "on this server? (Y/n)\", pred ) except asyncio.TimeoutError: return await", "content.\"\"\" await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag: SlashTag, limit: int", "= [] option_type = SlashOptionType[option_type.upper()] if not added_required: pred =", "f\"Test cog loaded: {testing_enabled}\", ] embed = discord.Embed( color=0xC9C9C9, title=\"SlashTags", "including without limitation the rights to use, copy, modify, merge,", "logging import re import types from collections import Counter from", "copy import copy from typing import Dict, List, Union import", "break options.append(option) if i == 10: break pred = MessagePredicate.yes_or_no(ctx)", "be split by a `:`.\", \"Example: `member:A member of this", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "ctx.embed_color()) embeds = [] for usage_data in chunks(counter.most_common(), 10): usage_chart", "tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async def slashtag_user( self,", "str ): \"\"\"Edit a slash tag's description.\"\"\" await ctx.send(await tag.edit_description(description))", "command. Only bot owners can use this.\", options=[ SlashOption(name=\"body\", description=\"Code", "copy from typing import Dict, List, Union import discord from", "def slashtagset_addeval(self, ctx: commands.Context): \"\"\"Add a slash eval command for", "without limitation the rights to use, copy, modify, merge, publish,", "tag_name: str, tagscript: str, *, is_global: bool = False, command_type:", "MIT License Copyright (c) 2020-present phenom4n4n Permission is hereby granted,", "slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Get a slash tag's", "{command!r} on guild {ctx.guild!r}\", exc_info=error ) text = ( \"Looks", "tag: GuildTagConverter, argument: str ): await tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\", aliases=[\"delete\",", "def decorator(overriden: Union[commands.Command, types.FunctionType]): doc = original.help if isinstance(original, commands.Command)", "def slashtag_global_edit_name( self, ctx: commands.Context, tag: GlobalTagConverter, *, name: TagName(global_priority=True),", "self.config.application_id.set(app_id) self.application_id = app_id await ctx.send(f\"Application ID set to `{id}`.\")", "): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\",", "List[SlashOption] = [] guild_id = None if is_global else ctx.guild.id", "error: log.error( \"Failed to create command {command!r} on guild {ctx.guild!r}\",", "TagName, TagScriptConverter, ) from ..http import ApplicationOptionChoice, SlashOptionType from ..objects", "tags: Dict[int, SlashTag], *, is_global: bool, ): description = [", "str ): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async def", "humanize_list, inline, pagify from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates", "] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred) match", "if option_type.lower() == \"choices\": choices = await self.get_choices(ctx) option_type =", "is_global=False) async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None):", "commands.Context, tag_name: str, tagscript: str, *, is_global: bool = False,", "applies to legacy bots. If you don't know what this", "= \"✅\" if self.testing_enabled else \"❎\" description = [ f\"Application", "use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use", "await self.get_options(ctx, options) command = ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id,", "str, tagscript: str, *, is_global: bool = False, command_type: ApplicationCommandType", ") if target_state is self.testing_enabled: loaded = \"loaded\" if target_state", "ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus import menu as button_menu", "self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx: commands.Context): await", "tagscript: TagScriptConverter ): \"\"\"Edit a slash tag's TagScript.\"\"\" await self.slashtag_edit(ctx,", "raw content.\"\"\" await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag: SlashTag, limit:", "deal in the Software without restriction, including without limitation the", "] embed = discord.Embed( color=0xC9C9C9, title=\"SlashTags Settings\", description=\"\\n\".join(description) ) await", "command options is needed raise tag = SlashTag( self, tagscript,", "less than or equal to 100 characters.*\", ] name_pred =", "slashtag_edit_name( self, ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False) ):", "await ctx.send(\"Timed out, not deleting slash tags.\") if not pred.result:", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "\"\"\"View stored slash tags.\"\"\" tags = self.guild_tag_cache[ctx.guild.id] if not tags:", "break return choices async def get_option( self, ctx: commands.Context, *,", "commands.Context, tag: GuildTagConverter, *, description: str ): \"\"\"Edit a slash", "def slashtagset_rmeval(self, ctx: commands.Context): \"\"\"Remove the slash eval command.\"\"\" if", "characters.*\", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred)", "@slashtag.group(\"global\") @copy_doc(slashtag) async def slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add)", "in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await", "test cog. \"\"\" target_state = ( true_or_false if true_or_false is", "except asyncio.TimeoutError: await ctx.send(\"Query timed out, not adding arguments.\") else:", "asyncio.TimeoutError: await self.delete_quietly(ask) raise await self.delete_quietly(ask) await self.delete_quietly(message) return message.content", "tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear()", "eval command hasn't been registered.\") try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException:", "should the tag description to be? (maximum 100 characters)\", pred=MessagePredicate.length_less(101,", "slashtag_global_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): await", "don't understand)\", ] option_type = await self.send_and_query_response( ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types,", "self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin)", "[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx, tag_name, tagscript,", "): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self,", "( \"Send the list of choice names and values you", "re import types from collections import Counter from copy import", "commands.Context): \"\"\" Slash Tag management with TagScript. These commands use", "name_pred) match = name_pred.result name, description = match.group(1), match.group(2) valid_option_types", "class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"]) async def slashtag(self, ctx: commands.Context): \"\"\"", "ctx.send(f\"The SlashTag interaction testing cog is already {loaded}.\") await self.config.testing_enabled.set(target_state)", "to create command {command!r} on guild {ctx.guild!r}\") # exc info", "name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type, ) try: await command.register() except", "arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash", "slashtag_user( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter,", "to a choice as its name and value \" \"weren't", "like to add arguments to this tag? (Y/n)\", pred )", "- \" limit -= len(title) tagscript = tag.tagscript if len(tagscript)", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "3: tagscript = tagscript[:limit] + \"...\" tagscript = tagscript.replace(\"\\n\", \"", "commands.Context, guild: discord.Guild = None): tags = self.guild_tag_cache[guild.id] if guild", "slashtag_global_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ):", "command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user) async def slashtag_global_user( self, ctx:", "command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await", "@slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self, ctx: commands.Context, tag: GlobalTagConverter,", "TagScriptConverter ): \"\"\"Edit a slash tag.\"\"\" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\")", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "add arguments to this tag? (Y/n)\", pred ) except asyncio.TimeoutError:", "@slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self, ctx: commands.Context, tag: GlobalTagConverter,", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "@slashtag_edit.command(\"tagscript\") async def slashtag_edit_tagscript( self, ctx: commands.Context, tag: GuildTagConverter, *,", "the argument name be and description be?\", \"The argument name", "for name in SlashOptionType.__members__.keys() if not name.startswith(\"SUB\") ] valid_option_types.append(\"choices\") option_query", "pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Would you like", "async def slashtag_global_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str", "= \"Unloaded\" self.remove_test_cog() await ctx.send(f\"{loaded} the SlashTag interaction testing cog.\")", "valid_option_types = [ name.lower() for name in SlashOptionType.__members__.keys() if not", "@slashtag.command(\"restore\", hidden=True) async def slashtag_restore(self, ctx: commands.Context): \"\"\"Restore all slash", "TagConverter): \"\"\"Get info about a slash tag that is stored", "all slash tags from the database.\"\"\" await self.restore_tags(ctx, ctx.guild) @commands.is_owner()", "for i in range(1, 11): try: option = await self.get_option(ctx,", "\"with this invite link and try again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" )", "choice names and values you would like to add as", "as error: log.error( \"Failed to create command {command!r} on guild", "CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT = 25 log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def", "merge, publish, distribute, sublicense, and/or sell copies of the Software,", "pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError: return await ctx.send(\"Tag addition timed", "\"Failed to create command {command!r} on guild {ctx.guild!r}\", exc_info=error )", "if self.eval_command: return await ctx.send(\"An eval command is already registered.\")", "so, subject to the following conditions: The above copyright notice", "charge, to any person obtaining a copy of this software", "single slash tag's argument by name.\"\"\" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True)", "import commands from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify from", "embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command(\"list\") async def slashtag_list(self, ctx:", "required = False return SlashOption( name=name.lower(), description=description, option_type=option_type, required=required, choices=choices,", "check=pred, timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask) raise await self.delete_quietly(ask) await", "already registered.\") slasheval = ApplicationCommand( self, name=\"eval\", description=\"SlashTags debugging eval", "permission notice shall be included in all copies or substantial", "`string` if you don't understand)\", ] option_type = await self.send_and_query_response(", "tag usage stats. **Example:** `[p]slashtag usage` \"\"\" await self.show_slash_tag_usage(ctx, ctx.guild)", "exceed 32 characters and can only contain characters \" \"that", "= await self.send_and_query_response( ctx, \"What should the tag description to", "ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await", "description=\"\\n\".join(description) ) await ctx.send(embed=embed) @slashtagset.command(\"appid\") async def slashtagset_appid(self, ctx: commands.Context,", "choices = [] option_type = SlashOptionType[option_type.upper()] if not added_required: pred", "slash tag.\"\"\" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async def slashtag_edit_tagscript( self,", "self.get_choices(ctx) option_type = \"STRING\" else: choices = [] option_type =", "= original.help if isinstance(original, commands.Command) else original.__doc__ doc = TAG_RE.sub(_sub,", "# exc info unneeded since error handler should print it,", ") -> List[SlashOption]: added_required = False for i in range(1,", "is stored on this server.\"\"\" await tag.send_info(ctx) @slashtag.command(\"raw\") async def", "if not pred.result: return await ctx.send(\"Ok, not deleting slash tags.\")", "tagscript: TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async def", "@commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message) async def slashtag_global_message( self, ctx: commands.Context, tag_name:", "@commands.check(dev_check) @slashtagset.command(\"rmeval\") async def slashtagset_rmeval(self, ctx: commands.Context): \"\"\"Remove the slash", "tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER", "ctx: commands.Context, guild: discord.Guild = None): tags = self.guild_tag_cache[guild.id] if", "value exceeded the 100 character limit.\", delete_after=15, ) continue choice", "needed raise tag = SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command,", "additional arguments.\") break if pred.result is False: break return options", "int = None): \"\"\" Manually set the application ID for", "menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"]) async def slashtag_usage(self, ctx: commands.Context):", "tag? (Y/n)\", pred ) except asyncio.TimeoutError: await ctx.send(\"Query timed out,", "name.istitle(): repl = repl.title() return repl def copy_doc(original: Union[commands.Command, types.FunctionType]):", "= logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match: re.Match) -> str: if match.group(1): return", "MessagePredicate.same_context(ctx) ask = await ctx.send(query) try: message = await self.bot.wait_for(\"message\",", "\"\"\" app_id = id or self.bot.user.id await self.config.application_id.set(app_id) self.application_id =", "\"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\", \"+\"]) async def slashtag_add( self, ctx:", "@commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\", \"-\"]) async def slashtag_remove(self, ctx: commands.Context, *,", "= \"global \" name = match.group(0) repl += name if", "tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE", "async def slashtagset_appid(self, ctx: commands.Context, id: int = None): \"\"\"", "owners can use this.\", options=[ SlashOption(name=\"body\", description=\"Code body to evaluate.\",", "if len(tagscript) > limit - 3: tagscript = tagscript[:limit] +", "return \"[p]slashtag global\" repl = \"global \" name = match.group(0)", "description = [ f\"Application ID: **{self.application_id}**\", f\"Eval command: {eval_command}\", f\"Test", "not name.startswith(\"SUB\") ] valid_option_types.append(\"choices\") option_query = [ \"What should the", "= \"global slash tags\" e.set_author(name=\"Global Slash Tags\", icon_url=ctx.me.avatar_url) else: slash_tags", "is not None else not await self.config.testing_enabled() ) if target_state", "@commands.is_owner() @slashtag.command(\"restore\", hidden=True) async def slashtag_restore(self, ctx: commands.Context): \"\"\"Restore all", "await command.register() except discord.Forbidden as error: log.error( \"Failed to create", "pred.result else: await ctx.send( \"This argument was automatically made optional", "I don't have permission to add Slash Commands here. Reinvite", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "pages = list(pagify(description)) for index, page in enumerate(pages, 1): embed", "= 60) -> str: title = f\"`{tag.type.get_prefix()}{tag.name}` - \" limit", "the slash eval command.\"\"\" if not self.eval_command: return await ctx.send(\"The", "Union import discord from redbot.core import commands from redbot.core.utils.chat_formatting import", "loaded = \"Loaded\" self.add_test_cog() else: loaded = \"Unloaded\" self.remove_test_cog() await", "\"\"\"Edit a slash tag's description.\"\"\" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"])", "added_required: pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, \"Is this argument", "await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await ctx.send(\"`/eval` has been registered.\")", "*, timeout: int = 60, ) -> str: if pred", "tag, tagscript=tagscript) @slashtag_edit.command(\"name\") async def slashtag_edit_name( self, ctx: commands.Context, tag:", "name: TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async def", "obtaining a copy of this software and associated documentation files", "as the previous one was optional.\", delete_after=15, ) required =", "return await ctx.send(message) counter = Counter({tag.name: tag.uses for tag in", "self, name=\"eval\", description=\"SlashTags debugging eval command. Only bot owners can", "async def get_option( self, ctx: commands.Context, *, added_required: bool =", "aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx: commands.Context, *, tag:", "ctx.send(text) except Exception: log.error(\"Failed to create command {command!r} on guild", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "a copy of this software and associated documentation files (the", "Counter from copy import copy from typing import Dict, List,", "optional.\", delete_after=15, ) required = False return SlashOption( name=name.lower(), description=description,", "..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus import menu", "await ctx.send(\"Tag addition timed out.\") else: description = \"\" if", "def slashtag_add( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter,", "\"\"\" Manually set the application ID for [botname] slash commands", "invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit( self, ctx: commands.Context, tag: GlobalTagConverter,", "if you choose to make this argument optional, all following", "color=0xC9C9C9, title=\"SlashTags Settings\", description=\"\\n\".join(description) ) await ctx.send(embed=embed) @slashtagset.command(\"appid\") async def", "tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"]) async def slashtag_pastebin(", "self.guild_tag_cache[guild.id] if guild else self.global_tag_cache if not tags: message =", "is self.testing_enabled: loaded = \"loaded\" if target_state else \"unloaded\" return", "slashtag_global_add( self, ctx: commands.Context, tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter, ):", "= self.guild_tag_cache[guild.id] if guild else self.global_tag_cache if not tags: message", "try: message = await self.bot.wait_for(\"message\", check=pred, timeout=timeout) except asyncio.TimeoutError: await", "GuildTagConverter, *, tagscript: TagScriptConverter ): \"\"\"Edit a slash tag.\"\"\" await", "menu from redbot.core.utils.predicates import MessagePredicate from tabulate import tabulate from", "author_id=ctx.author.id, command=command, ) await ctx.send(await tag.initialize()) async def get_options( self,", "is None: pred = MessagePredicate.same_context(ctx) ask = await ctx.send(query) try:", "log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match: re.Match) -> str: if match.group(1):", "a slash tag's raw content.\"\"\" await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls,", "embed = e.copy() embed.description = usage_chart embeds.append(embed) await menu(ctx, embeds,", "bool = None): \"\"\" Load or unload the SlashTag interaction", "= repl.title() return repl def copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden:", "= \"slash tags\" e.set_author(name=\"Stored Slash Tags\", icon_url=ctx.guild.icon_url) embeds = []", ") response = await self.send_and_query_response(ctx, query) choices = [] for", "slashtag_restore(self, ctx: commands.Context): \"\"\"Restore all slash tags from the database.\"\"\"", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "ctx.send(await tag.initialize()) async def get_options( self, ctx: commands.Context, options: List[SlashOption]", "names and values should be seperated by `:`, and each", "tags.\") guild: discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for tag", "GlobalTagConverter, *, tagscript: TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript)", "\"\"\"Get info about a slash tag that is stored on", "str: if pred is None: pred = MessagePredicate.same_context(ctx) ask =", "the previous one was optional.\", delete_after=15, ) required = False", "tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag: SlashTag, limit: int = 60)", "def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None): \"\"\" Load", "CHOICE_RE.match(choice_text): await ctx.send( f\"Failed to parse `{choice_text}` to a choice", "optional as the previous one was optional.\", delete_after=15, ) required", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "values you would like to add as choices to \"", "slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await ctx.send(\"`/eval` has been", "timeout: int = 60, ) -> str: if pred is", "tag.\"\"\" await ctx.send(await tag.delete()) @slashtag.command(\"info\") async def slashtag_info(self, ctx: commands.Context,", "str, *, is_global: bool = False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT,", "it. \"\"\" app_id = id or self.bot.user.id await self.config.application_id.set(app_id) self.application_id", "is furnished to do so, subject to the following conditions:", "\"\"\" Add a slash tag with TagScript. [Slash tag usage", "\"What should the tag description to be? (maximum 100 characters)\",", "to 100 characters.*\", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx,", "tag arguments. \"\"\" await tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"]) async def slashtag_edit_argument(", "TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description(", "usage_chart = box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")), \"prolog\") embed = e.copy() embed.description", "`{choice_text}` to a choice as its name and value \"", "= [ name.lower() for name in SlashOptionType.__members__.keys() if not name.startswith(\"SUB\")", "a slash tag's description.\"\"\" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async", "a slash eval command for debugging.\"\"\" if self.eval_command: return await", "dev_check TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT = 25", "to be? (maximum 100 characters)\", pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError:", "slash tags from the database.\"\"\" await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"clear\",", "not await self.config.testing_enabled() ) if target_state is self.testing_enabled: loaded =", "GuildTagConverter): \"\"\" Edit a slash tag's arguments. See [this documentation", "SlashTag interaction development test cog. \"\"\" target_state = ( true_or_false", "def send_and_query_response( self, ctx: commands.Context, query: str, pred: MessagePredicate =", "means, you don't need to worry about it. \"\"\" app_id", "self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): \"\"\"Edit a", "if \":\" not in choice_text: await ctx.send( f\"Failed to parse", "await self.send_and_query_response( ctx, \"Would you like to add arguments to", "], ) await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await", "(Y/n)\", pred ) except asyncio.TimeoutError: await ctx.send(\"Query timed out, not", "required? (Y/n)\\n*Keep in mind that if you choose to make", "if it differs from the bot user ID. This only", "are alphanumeric or '_' or '-'.\", \"The argument description must", "to add Slash Commands here. Reinvite me \" \"with this", "stats. **Example:** `[p]slashtag usage` \"\"\" await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"restore\",", "tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async", "commands.Context): \"\"\"Add a slash eval command for debugging.\"\"\" if self.eval_command:", "async def slashtagset_settings(self, ctx: commands.Context): \"\"\"View SlashTags settings.\"\"\" eval_command =", "def slashtag_message( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript:", "TagScriptConverter, ): \"\"\" Add a slash tag with TagScript. [Slash", "page embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)} {slash_tags}\") embeds.append(embed) # await menu(ctx, embeds,", "cog is already {loaded}.\") await self.config.testing_enabled.set(target_state) if target_state: loaded =", "characters)\", pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError: return await ctx.send(\"Tag addition", "del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send(\"Tags deleted.\") @commands.is_owner() @slashtag.group(\"global\")", "TagName(global_priority=True), *, tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True)", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "slash tag with a Pastebin link. \"\"\" await self.create_slash_tag(ctx, tag_name,", "if target_state: loaded = \"Loaded\" self.add_test_cog() else: loaded = \"Unloaded\"", "async def slashtag_add( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, tagscript:", "should the argument type be?\", f\"Valid option types: {humanize_list([inline(n) for", "await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self, ctx:", "parse `{choice_text}` to a choice as its name and value", "eval_command = f\"✅ (**{self.eval_command}**)\" if self.eval_command else \"❎\" testing_enabled =", "description be?\", \"The argument name and description should be split", "slasheval.id await ctx.send(\"`/eval` has been registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\") async def", "addition timed out.\") else: description = \"\" if command_type ==", "): \"\"\"Edit a slash tag.\"\"\" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async", "== ApplicationCommandType.CHAT_INPUT: try: description = await self.send_and_query_response( ctx, \"What should", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "not added_required: pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, \"Is this", "\"\"\"Get a slash tag's raw content.\"\"\" await tag.send_raw_tagscript(ctx) @classmethod def", "= [] guild_id = None if is_global else ctx.guild.id if", "async def slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=[\"slashset\"])", "bool, ): description = [ self.format_tagscript(tag) for tag in sorted(tags.values(),", "def slashtag_restore(self, ctx: commands.Context): \"\"\"Restore all slash tags from the", "command for debugging.\"\"\" if self.eval_command: return await ctx.send(\"An eval command", "11): try: option = await self.get_option(ctx, added_required=added_required) if not option.required:", "self.application_id = app_id await ctx.send(f\"Application ID set to `{id}`.\") @commands.check(dev_check)", "range(1, 11): try: option = await self.get_option(ctx, added_required=added_required) if not", "commands.Context) -> List[ApplicationOptionChoice]: query = ( \"Send the list of", "self, ctx: commands.Context, tag: GlobalTagConverter, *, description: str ): await", "to \" \"the tag. Choice names and values should be", "= self.global_tag_cache if not tags: return await ctx.send(\"There are no", "to add arguments to this tag? (Y/n)\", pred ) except", "self.config.testing_enabled.set(target_state) if target_state: loaded = \"Loaded\" self.add_test_cog() else: loaded =", "= await self.get_option(ctx, added_required=added_required) if not option.required: added_required = True", "else \"❎\" description = [ f\"Application ID: **{self.application_id}**\", f\"Eval command:", "it, however info on the command options is needed raise", "if not tags: return await ctx.send(\"There are no slash tags", "been registered.\") try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await self.config.eval_command.clear()", "added_required: bool = False ) -> SlashOption: name_desc = [", "tag in tags.copy().values()}) e = discord.Embed(title=\"Slash Tag Stats\", color=await ctx.embed_color())", "ctx.send(\"There are no global slash tags.\") await self.view_slash_tags(ctx, tags, is_global=True)", "= None): tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache if", "n in valid_option_types])}\", \"(select `string` if you don't understand)\", ]", "= False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] =", "it differs from the bot user ID. This only applies", "tag.edit_name(name)) @slashtag_edit.command(\"description\") async def slashtag_edit_description( self, ctx: commands.Context, tag: GuildTagConverter,", "TagScript. These commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ):", "timed out.\", delete_after=15) break options.append(option) if i == 10: break", "[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False)", "asyncio.TimeoutError: return await ctx.send(\"Tag addition timed out.\") else: description =", "stored slash tags.\"\"\" tags = self.guild_tag_cache[ctx.guild.id] if not tags: return", "ctx: commands.Context, *, tag: GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw)", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "\"\"\" Slash Tag management with TagScript. These commands use TagScriptEngine.", "USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import asyncio", "`{choice_text}` to a choice as \" \"its name or value", "limit -= len(title) tagscript = tag.tagscript if len(tagscript) > limit", "tags.\" ) return await ctx.send(message) counter = Counter({tag.name: tag.uses for", "async def slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore)", "decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"]) async def slashtag(self, ctx: commands.Context):", "self.config.guild(guild).tags.clear() await ctx.send(\"Tags deleted.\") @commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag) async def slashtag_global(self,", "self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): await tag.edit_single_option(ctx,", "command is already registered.\") slasheval = ApplicationCommand( self, name=\"eval\", description=\"SlashTags", ") await ctx.send(embed=embed) @slashtagset.command(\"appid\") async def slashtagset_appid(self, ctx: commands.Context, id:", "commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): \"\"\" Add", "commands.Context, *, tag: GuildTagConverter): \"\"\"Get a slash tag's raw content.\"\"\"", "tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument(", "slash commands if it differs from the bot user ID.", "as \" \"its name or value exceeded the 100 character", "me \" \"with this invite link and try again: <https://discordapp.com/oauth2/authorize\"", "*, tagscript: TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async", "check_regex=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a message command", "guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async def create_slash_tag(", "await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list) async def slashtag_global_list(self, ctx: commands.Context): tags", "\"STRING\" else: choices = [] option_type = SlashOptionType[option_type.upper()] if not", "true_or_false if true_or_false is not None else not await self.config.testing_enabled()", "ctx: commands.Context, true_or_false: bool = None): \"\"\" Load or unload", "settings.\"\"\" eval_command = f\"✅ (**{self.eval_command}**)\" if self.eval_command else \"❎\" testing_enabled", "-= len(title) tagscript = tag.tagscript if len(tagscript) > limit -", "@slashtagset.command(\"appid\") async def slashtagset_appid(self, ctx: commands.Context, id: int = None):", "ctx.send(\"Query timed out, not adding arguments.\") else: if pred.result is", "[] pages = list(pagify(description)) for index, page in enumerate(pages, 1):", "should be split by a `:`.\", \"Example: `member:A member of", "can only contain characters \" \"that are alphanumeric or '_'", "return await ctx.send(\"Timed out, not deleting slash tags.\") if not", "\"\"\"View SlashTags settings.\"\"\" eval_command = f\"✅ (**{self.eval_command}**)\" if self.eval_command else", "10): usage_chart = box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")), \"prolog\") embed = e.copy()", "self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send(\"Tags deleted.\") @commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag) async", "tags.\"\"\" tags = self.guild_tag_cache[ctx.guild.id] if not tags: return await ctx.send(\"There", "guild else \"There are no global slash tags.\" ) return", "characters \" \"that are alphanumeric or '_' or '-'.\", \"The", "command {command!r} on guild {ctx.guild!r}\", exc_info=error ) text = (", "types: {humanize_list([inline(n) for n in valid_option_types])}\", \"(select `string` if you", "ctx: commands.Context, tags: Dict[int, SlashTag], *, is_global: bool, ): description", "log.error( \"Failed to create command {command!r} on guild {ctx.guild!r}\", exc_info=error", "`[p]slashtag usage` \"\"\" await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"restore\", hidden=True) async", "@slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) async def slashtag_edit( self, ctx: commands.Context, tag:", "ctx.send(message) counter = Counter({tag.name: tag.uses for tag in tags.copy().values()}) e", "self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async", "self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command(\"name\") async def slashtag_edit_name( self, ctx: commands.Context,", "self.send_and_query_response( ctx, \"Are you sure you want to delete all", "tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx: commands.Context, *, tag:", "permission to add Slash Commands here. Reinvite me \" \"with", "this argument timed out.\", delete_after=15) break options.append(option) if i ==", "and values should be seperated by `:`, and each choice", "name.startswith(\"SUB\") ] valid_option_types.append(\"choices\") option_query = [ \"What should the argument", "usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE", "to parse `{choice_text}` to a choice as \" \"its name", "await tag.send_info(ctx) @slashtag.command(\"raw\") async def slashtag_raw(self, ctx: commands.Context, *, tag:", "= await self.bot.wait_for(\"message\", check=pred, timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask) raise", "\"unloaded\" return await ctx.send(f\"The SlashTag interaction testing cog is already", "self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user)", "name.lower() for name in SlashOptionType.__members__.keys() if not name.startswith(\"SUB\") ] valid_option_types.append(\"choices\")", "await ctx.send(message) counter = Counter({tag.name: tag.uses for tag in tags.copy().values()})", "@copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter): await", "i == 10: break pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response(", "ctx), ) except asyncio.TimeoutError: return await ctx.send(\"Tag addition timed out.\")", "return repl def copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]):", "pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Are you sure", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import", "slash tag's description.\"\"\" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async def", "SlashOption(name=\"body\", description=\"Code body to evaluate.\", required=True) ], ) await slasheval.register()", "be?\", \"The argument name and description should be split by", "= None await ctx.send(\"`/eval` has been deleted.\") @slashtagset.command(\"testing\") async def", "redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify from redbot.core.utils.menus import DEFAULT_CONTROLS,", "self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"clear\", hidden=True) async def slashtag_clear(self, ctx: commands.Context):", "*, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list) async def slashtag_global_list(self,", "GlobalTagConverter, *, tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\")", "TagScriptConverter ): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async def", ") @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async def slashtag_user( self, ctx: commands.Context, tag_name:", "name=name.lower(), description=description, option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async def", "[this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag arguments.", "option.required: added_required = True except asyncio.TimeoutError: await ctx.send(\"Adding this argument", "await self.send_and_query_response( ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower() ==", ") text = ( \"Looks like I don't have permission", "handler should print it, however info on the command options", "no global slash tags.\") await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"])", "by `:`, and each choice \" \"should be seperated by", "to add another argument? (Y/n)\", pred ) except asyncio.TimeoutError: await", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", "log.error(\"Failed to create command {command!r} on guild {ctx.guild!r}\") # exc", "on the command options is needed raise tag = SlashTag(", "a Pastebin link. \"\"\" await self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True)", "@copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter): await", "self.eval_command = None await ctx.send(\"`/eval` has been deleted.\") @slashtagset.command(\"testing\") async", "tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name, tagscript,", "previous one was optional.\", delete_after=15, ) required = False return", "@slashtag.command(\"list\") async def slashtag_list(self, ctx: commands.Context): \"\"\"View stored slash tags.\"\"\"", "await self.send_and_query_response( ctx, \"Is this argument required? (Y/n)\\n*Keep in mind", "self.send_and_query_response( ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower() == \"choices\":", "description: str ): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async", "modify, merge, publish, distribute, sublicense, and/or sell copies of the", "= usage_chart embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"]) async", "headers=(\"Tag\", \"Uses\")), \"prolog\") embed = e.copy() embed.description = usage_chart embeds.append(embed)", "match.group(1), match.group(2) valid_option_types = [ name.lower() for name in SlashOptionType.__members__.keys()", "await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx: commands.Context):", "arguments.\") break if pred.result is False: break return options async", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "@copy_doc(slashtag) async def slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add) async", "ctx: commands.Context, tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx,", "from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import MessagePredicate from", "ctx: commands.Context): \"\"\"Manage SlashTags settings.\"\"\" @slashtagset.command(\"settings\") async def slashtagset_settings(self, ctx:", "if pred.result is False: break return options async def send_and_query_response(", "if you don't understand)\", ] option_type = await self.send_and_query_response( ctx,", "def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None): tags =", "@slashtag_global.command(\"user\") @copy_doc(slashtag_user) async def slashtag_global_user( self, ctx: commands.Context, tag_name: TagName(global_priority=True,", "tag: GuildTagConverter): \"\"\"Get a slash tag's raw content.\"\"\" await tag.send_raw_tagscript(ctx)", "return choices async def get_option( self, ctx: commands.Context, *, added_required:", "slasheval = ApplicationCommand( self, name=\"eval\", description=\"SlashTags debugging eval command. Only", "re.Match) -> str: if match.group(1): return \"[p]slashtag global\" repl =", "like to add as choices to \" \"the tag. Choice", "commands.Context): tags = self.global_tag_cache if not tags: return await ctx.send(\"There", "slash_tags = \"global slash tags\" e.set_author(name=\"Global Slash Tags\", icon_url=ctx.me.avatar_url) else:", "slashtagset(self, ctx: commands.Context): \"\"\"Manage SlashTags settings.\"\"\" @slashtagset.command(\"settings\") async def slashtagset_settings(self,", "self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) async def", "[] guild_id = None if is_global else ctx.guild.id if command_type", "commands.Context, query: str, pred: MessagePredicate = None, *, timeout: int", "in SlashOptionType.__members__.keys() if not name.startswith(\"SUB\") ] valid_option_types.append(\"choices\") option_query = [", "*, tag: GuildTagConverter): \"\"\"Get a slash tag's raw content.\"\"\" await", "CHOICE_LIMIT = 25 log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match: re.Match) ->", "is_global else ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT: try: description =", "about it. \"\"\" app_id = id or self.bot.user.id await self.config.application_id.set(app_id)", "by a `:`.\", \"Example: `member:A member of this server.`\\n\", \"*Slash", "`|`. Example:\\n`dog:Doggo|cat:Catto`\" ) response = await self.send_and_query_response(ctx, query) choices =", "async def slashtag_global_user( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *,", "for tag in sorted(tags.values(), key=lambda t: t.name) ] description =", "self.eval_command: return await ctx.send(\"An eval command is already registered.\") slasheval", "sell copies of the Software, and to permit persons to", "ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async def slashtag_edit_arguments(self, ctx: commands.Context, tag:", ") @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"]) async def slashtag_pastebin( self, ctx: commands.Context,", "if target_state is self.testing_enabled: loaded = \"loaded\" if target_state else", "and each choice \" \"should be seperated by `|`. Example:\\n`dog:Doggo|cat:Catto`\"", "async def create_slash_tag( self, ctx: commands.Context, tag_name: str, tagscript: str,", "..abc import MixinMeta from ..converters import ( GlobalTagConverter, GuildTagConverter, PastebinConverter,", "@slashtag_global.command(\"list\") @copy_doc(slashtag_list) async def slashtag_global_list(self, ctx: commands.Context): tags = self.global_tag_cache", "ctx.send(\"Tag addition timed out.\") else: description = \"\" if command_type", "tag: GlobalTagConverter, *, name: TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\")", "@slashtag_global.command(\"message\") @copy_doc(slashtag_message) async def slashtag_global_message( self, ctx: commands.Context, tag_name: TagName(global_priority=True,", "by `|`. Example:\\n`dog:Doggo|cat:Catto`\" ) response = await self.send_and_query_response(ctx, query) choices", "TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name,", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "= Counter({tag.name: tag.uses for tag in tags.copy().values()}) e = discord.Embed(title=\"Slash", "return message.content async def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]: query", "target_state is self.testing_enabled: loaded = \"loaded\" if target_state else \"unloaded\"", "] option_type = await self.send_and_query_response( ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), )", "options: List[SlashOption] = [] guild_id = None if is_global else", "in choice_text: await ctx.send( f\"Failed to parse `{choice_text}` to a", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "option types: {humanize_list([inline(n) for n in valid_option_types])}\", \"(select `string` if", "): \"\"\" Add a message command tag with TagScript. [Slash", "tag's argument by name.\"\"\" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\",", "\" limit -= len(title) tagscript = tag.tagscript if len(tagscript) >", "and values you would like to add as choices to", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "publish, distribute, sublicense, and/or sell copies of the Software, and", "seperated by `:`, and each choice \" \"should be seperated", "commands.Context): \"\"\"Restore all slash tags from the database.\"\"\" await self.restore_tags(ctx,", "self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): \"\"\"", "TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command): overriden._help_override = doc else: overriden.__doc__", "you don't know what this means, you don't need to", "SlashOptionType.__members__.keys() if not name.startswith(\"SUB\") ] valid_option_types.append(\"choices\") option_query = [ \"What", "bool = False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption]", "= id or self.bot.user.id await self.config.application_id.set(app_id) self.application_id = app_id await", "required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async def slashtag_message( self, ctx:", "invoke_without_command=True) async def slashtag_edit( self, ctx: commands.Context, tag: GuildTagConverter, *,", "is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"]) async def slashtag_pastebin( self,", "@copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *,", "be included in all copies or substantial portions of the", "async def slashtag_global_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link:", "slash tag's arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information", "@slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self, ctx: commands.Context, tag_name:", "inline, pagify from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import", "sure you want to delete all slash tags on this", "Add a slash tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)", "you don't need to worry about it. \"\"\" app_id =", "only contain characters \" \"that are alphanumeric or '_' or", "ctx: commands.Context, query: str, pred: MessagePredicate = None, *, timeout:", "ctx: commands.Context) -> List[ApplicationOptionChoice]: query = ( \"Send the list", "print it, however info on the command options is needed", "query: str, pred: MessagePredicate = None, *, timeout: int =", "self, ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False) ): \"\"\"Edit", "ApplicationOptionChoice, SlashOptionType from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from", ") except asyncio.TimeoutError: await ctx.send(\"Query timed out, not adding arguments.\")", "= await ctx.send(query) try: message = await self.bot.wait_for(\"message\", check=pred, timeout=timeout)", "@slashtagset.command(\"rmeval\") async def slashtagset_rmeval(self, ctx: commands.Context): \"\"\"Remove the slash eval", "except asyncio.TimeoutError: return await ctx.send(\"Tag addition timed out.\") else: description", "aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False),", "ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type, ) try: await", "ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\") async def slashtag_edit_description( self, ctx: commands.Context, tag:", "self.bot.user.id await self.config.application_id.set(app_id) self.application_id = app_id await ctx.send(f\"Application ID set", "by a `:`.\", delete_after=15, ) continue if not CHOICE_RE.match(choice_text): await", "the argument type be?\", f\"Valid option types: {humanize_list([inline(n) for n", "must be less than or equal to 100 characters.*\", ]", "ctx.send(\"Ok, not deleting slash tags.\") guild: discord.Guild = ctx.guild await", "\"What should the argument type be?\", f\"Valid option types: {humanize_list([inline(n)", "import re import types from collections import Counter from copy", "for debugging.\"\"\" if self.eval_command: return await ctx.send(\"An eval command is", "deleted.\") @slashtagset.command(\"testing\") async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool =", "+ \"...\" tagscript = tagscript.replace(\"\\n\", \" \") return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async", "commands.Context): \"\"\"View SlashTags settings.\"\"\" eval_command = f\"✅ (**{self.eval_command}**)\" if self.eval_command", "async def slashtag_edit_tagscript( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript:", "def format_tagscript(cls, tag: SlashTag, limit: int = 60) -> str:", "to delete all slash tags on this server? (Y/n)\", pred", "\"\"\" Load or unload the SlashTag interaction development test cog.", "or '_' or '-'.\", \"The argument description must be less", "): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True)", "in enumerate(pages, 1): embed = e.copy() embed.description = page embed.set_footer(text=f\"{index}/{len(pages)}", "@slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter): \"\"\"", "= tagscript[:limit] + \"...\" tagscript = tagscript.replace(\"\\n\", \" \") return", "command hasn't been registered.\") try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass", "..testing.button_menus import menu as button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks,", "conditions: The above copyright notice and this permission notice shall", "the Software without restriction, including without limitation the rights to", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "not CHOICE_RE.match(choice_text): await ctx.send( f\"Failed to parse `{choice_text}` to a", "tag.send_info(ctx) @slashtag.command(\"raw\") async def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter):", "tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send(\"Tags deleted.\") @commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag)", "sorted(tags.values(), key=lambda t: t.name) ] description = \"\\n\".join(description) e =", "show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None): tags = self.guild_tag_cache[guild.id]", "from typing import Dict, List, Union import discord from redbot.core", "choices ({CHOICE_LIMIT}).\") break return choices async def get_option( self, ctx:", "t: t.name) ] description = \"\\n\".join(description) e = discord.Embed(color=await ctx.embed_color())", "tagscript.replace(\"\\n\", \" \") return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def view_slash_tags( self, ctx:", "if self.testing_enabled else \"❎\" description = [ f\"Application ID: **{self.application_id}**\",", "match.group(2) valid_option_types = [ name.lower() for name in SlashOptionType.__members__.keys() if", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "you choose to make this argument optional, all following arguments", "page in enumerate(pages, 1): embed = e.copy() embed.description = page", "are no global slash tags.\" ) return await ctx.send(message) counter", "await menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command(\"list\") async def", "def slashtag_edit( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter", "a slash tag.\"\"\" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async def slashtag_edit_tagscript(", "slashtag_clear(self, ctx: commands.Context): \"\"\"Clear all slash tags for this server.\"\"\"", "\"\"\"Add a slash eval command for debugging.\"\"\" if self.eval_command: return", "tag_name, tagscript, is_global=False) async def create_slash_tag( self, ctx: commands.Context, tag_name:", "phenom4n4n Permission is hereby granted, free of charge, to any", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "get_option( self, ctx: commands.Context, *, added_required: bool = False )", "type=command_type, ) try: await command.register() except discord.Forbidden as error: log.error(", "): \"\"\"Edit a single slash tag's argument by name.\"\"\" await", "async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Delete a", "else self.global_tag_cache if not tags: message = ( \"This server", "else \"❎\" testing_enabled = \"✅\" if self.testing_enabled else \"❎\" description", "tagscript = tag.tagscript if len(tagscript) > limit - 3: tagscript", "def slashtagset_settings(self, ctx: commands.Context): \"\"\"View SlashTags settings.\"\"\" eval_command = f\"✅", "tag's arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on", "tags = self.global_tag_cache if not tags: return await ctx.send(\"There are", "def create_slash_tag( self, ctx: commands.Context, tag_name: str, tagscript: str, *,", "await ctx.send(embed=embed) @slashtagset.command(\"appid\") async def slashtagset_appid(self, ctx: commands.Context, id: int", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "async def slashtag_global_edit_tagscript( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript:", "self.config.testing_enabled() ) if target_state is self.testing_enabled: loaded = \"loaded\" if", "= ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type, ) try:", "): await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message) async", "command.\"\"\" if not self.eval_command: return await ctx.send(\"The eval command hasn't", "commands.Command): overriden._help_override = doc else: overriden.__doc__ = doc return overriden", "def slashtag_user( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript:", "@classmethod def format_tagscript(cls, tag: SlashTag, limit: int = 60) ->", "[ \"What should the argument type be?\", f\"Valid option types:", "async def slashtag_clear(self, ctx: commands.Context): \"\"\"Clear all slash tags for", "\"that are alphanumeric or '_' or '-'.\", \"The argument description", "async def slashtag_list(self, ctx: commands.Context): \"\"\"View stored slash tags.\"\"\" tags", "GuildTagConverter, *, name: TagName(check_global=False) ): \"\"\"Edit a slash tag's name.\"\"\"", "def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"])", "description = [ self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t:", "SlashOptionType[option_type.upper()] if not added_required: pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx,", "server.\"\"\" await tag.send_info(ctx) @slashtag.command(\"raw\") async def slashtag_raw(self, ctx: commands.Context, *,", "message = ( \"This server has no slash tags.\" if", "commands.Context, *, tag: GuildTagConverter): \"\"\"Delete a slash tag.\"\"\" await ctx.send(await", "\"\"\"Edit a slash tag's name.\"\"\" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\") async", "Dict[int, SlashTag], *, is_global: bool, ): description = [ self.format_tagscript(tag)", "except asyncio.TimeoutError: await self.delete_quietly(ask) raise await self.delete_quietly(ask) await self.delete_quietly(message) return", "@commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) async def slashtag_edit( self, ctx: commands.Context,", "overriden.__doc__ = doc return overriden return decorator class Commands(MixinMeta): @commands.guild_only()", "to permit persons to whom the Software is furnished to", "this.\", options=[ SlashOption(name=\"body\", description=\"Code body to evaluate.\", required=True) ], )", "to use TagScript blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\", \"+\"]) async", "\"slash tags\" e.set_author(name=\"Stored Slash Tags\", icon_url=ctx.guild.icon_url) embeds = [] pages", "be?\", f\"Valid option types: {humanize_list([inline(n) for n in valid_option_types])}\", \"(select", "MessagePredicate = None, *, timeout: int = 60, ) ->", "collections import Counter from copy import copy from typing import", "ctx.send( f\"Failed to parse `{choice_text}` to a choice as \"", "tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx:", "not None else not await self.config.testing_enabled() ) if target_state is", "100 characters.*\", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, \"\\n\".join(name_desc),", "from ..converters import ( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter,", "added_required = True except asyncio.TimeoutError: await ctx.send(\"Adding this argument timed", "documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag arguments. \"\"\"", "[ f\"Application ID: **{self.application_id}**\", f\"Eval command: {eval_command}\", f\"Test cog loaded:", "( \"Looks like I don't have permission to add Slash", "made optional as the previous one was optional.\", delete_after=15, )", "SlashTag, limit: int = 60) -> str: title = f\"`{tag.type.get_prefix()}{tag.name}`", "to parse `{choice_text}` to a choice as its name and", "this invite link and try again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return", "target_state else \"unloaded\" return await ctx.send(f\"The SlashTag interaction testing cog", "this server.\"\"\" pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Are", "PastebinConverter, ): await self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True)", "\"\"\"Edit a slash tag.\"\"\" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async def", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "usage stats. **Example:** `[p]slashtag usage` \"\"\" await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner()", "target_state = ( true_or_false if true_or_false is not None else", "names may not exceed 32 characters and can only contain", "choose to make this argument optional, all following arguments must", "\"\"\" Add a user command tag with TagScript. [Slash tag", "server.\") await self.view_slash_tags(ctx, tags, is_global=False) async def show_slash_tag_usage(self, ctx: commands.Context,", "TagScriptConverter, ): \"\"\" Add a message command tag with TagScript.", "return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def view_slash_tags( self, ctx: commands.Context, tags: Dict[int,", "commands.Context, tag: GuildTagConverter): \"\"\" Edit a slash tag's arguments. See", "argument by name.\"\"\" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\", \"-\"])", "@slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx)", "deleted.\") @commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag) async def slashtag_global(self, ctx: commands.Context): pass", "= self.guild_tag_cache[ctx.guild.id] if not tags: return await ctx.send(\"There are no", "\"-\"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter):", "link. \"\"\" await self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"],", "to whom the Software is furnished to do so, subject", "equal to 100 characters.*\", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await", "await ctx.send(\"There are no slash tags on this server.\") await", "\"Would you like to add arguments to this tag? (Y/n)\",", "argument) @slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx: commands.Context,", "tags = self.guild_tag_cache[ctx.guild.id] if not tags: return await ctx.send(\"There are", "how to use TagScript blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\", \"+\"])", "be seperated by `:`, and each choice \" \"should be", "await self.delete_quietly(ask) raise await self.delete_quietly(ask) await self.delete_quietly(message) return message.content async", "slashtag_add( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter, ):", "registered.\") try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command", "\"\\n\".join(name_desc), name_pred) match = name_pred.result name, description = match.group(1), match.group(2)", "link and try again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return await ctx.send(text)", "tag: GlobalTagConverter, *, description: str ): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\",", "= None, *, timeout: int = 60, ) -> str:", "bots. If you don't know what this means, you don't", "is needed raise tag = SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id,", "of the Software, and to permit persons to whom the", "+= name if name.istitle(): repl = repl.title() return repl def", "server has no slash tags.\" if guild else \"There are", "commands.Context, id: int = None): \"\"\" Manually set the application", "be seperated by `|`. Example:\\n`dog:Doggo|cat:Catto`\" ) response = await self.send_and_query_response(ctx,", "self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ):", "argument description must be less than or equal to 100", "usage_chart embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"]) async def", "another argument? (Y/n)\", pred ) except asyncio.TimeoutError: await ctx.send(\"Query timed", "overriden._help_override = doc else: overriden.__doc__ = doc return overriden return", "\"\"\"Edit a single slash tag's argument by name.\"\"\" await tag.edit_single_option(ctx,", "id: int = None): \"\"\" Manually set the application ID", "MessagePredicate from tabulate import tabulate from ..abc import MixinMeta from", "GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter, ) from ..http import ApplicationOptionChoice,", "async def slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add) async def", "(the \"Software\"), to deal in the Software without restriction, including", "choice as its name and value \" \"weren't seperated by", "GuildTagConverter, argument: str ): \"\"\"Edit a single slash tag's argument", "been deleted.\") @slashtagset.command(\"testing\") async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool", "is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self,", "@copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self, ctx: commands.Context, tag: GlobalTagConverter, *,", "a message command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "= ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache()", "break pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Would you", "'_' or '-'.\", \"The argument description must be less than", "if isinstance(overriden, commands.Command): overriden._help_override = doc else: overriden.__doc__ = doc", "\"...\" tagscript = tagscript.replace(\"\\n\", \" \") return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def", "except asyncio.TimeoutError: await ctx.send(\"Adding this argument timed out.\", delete_after=15) break", "create command {command!r} on guild {ctx.guild!r}\") # exc info unneeded", "don't know what this means, you don't need to worry", "response = await self.send_and_query_response(ctx, query) choices = [] for choice_text", "out.\", delete_after=15) break options.append(option) if i == 10: break pred", "in the Software without restriction, including without limitation the rights", "Stats\", color=await ctx.embed_color()) embeds = [] for usage_data in chunks(counter.most_common(),", "argument name be and description be?\", \"The argument name and", "and description should be split by a `:`.\", \"Example: `member:A", "ctx, \"Would you like to add another argument? (Y/n)\", pred", "\"Are you sure you want to delete all slash tags", "slash tags\" e.set_author(name=\"Global Slash Tags\", icon_url=ctx.me.avatar_url) else: slash_tags = \"slash", "value \" \"weren't seperated by a `:`.\", delete_after=15, ) continue", "command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self, ctx:", "self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command = None await", "): description = [ self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda", "is False: break return options async def send_and_query_response( self, ctx:", "eval command. Only bot owners can use this.\", options=[ SlashOption(name=\"body\",", "slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Delete a slash tag.\"\"\"", ") continue choice = ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice) if len(choices) >=", "tag_name, link, is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit(", "hidden=True) async def slashtag_clear(self, ctx: commands.Context): \"\"\"Clear all slash tags", "await ctx.send(f\"The SlashTag interaction testing cog is already {loaded}.\") await", "None if is_global else ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT: try:", "= None): \"\"\" Load or unload the SlashTag interaction development", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "argument: str ): \"\"\"Edit a single slash tag's argument by", "`:`.\", \"Example: `member:A member of this server.`\\n\", \"*Slash argument names", "[botname] slash commands if it differs from the bot user", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\"", "( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter, ) from ..http", "been registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\") async def slashtagset_rmeval(self, ctx: commands.Context): \"\"\"Remove", "1)) choices.append(choice) if len(choices) >= CHOICE_LIMIT: await ctx.send(f\"Reached max choices", "ctx: commands.Context): pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add) async def slashtag_global_add( self, ctx:", "pred is None: pred = MessagePredicate.same_context(ctx) ask = await ctx.send(query)", "get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]: query = ( \"Send the", "from redbot.core.utils.predicates import MessagePredicate from tabulate import tabulate from ..abc", "if not tags: message = ( \"This server has no", "app_id await ctx.send(f\"Application ID set to `{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\") async", "f\"Failed to parse `{choice_text}` to a choice as its name", "): await self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit)", "choice_text: await ctx.send( f\"Failed to parse `{choice_text}` to a choice", "don't need to worry about it. \"\"\" app_id = id", "ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Would you", "TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message)", "TagName(check_global=False), *, link: PastebinConverter, ): \"\"\" Add a slash tag", "commands.Context): \"\"\" See this slash tag usage stats. **Example:** `[p]slashtag", "@commands.is_owner() @commands.group(aliases=[\"slashset\"]) async def slashtagset(self, ctx: commands.Context): \"\"\"Manage SlashTags settings.\"\"\"", "ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self, ctx: commands.Context,", "on this server.\"\"\" await tag.send_info(ctx) @slashtag.command(\"raw\") async def slashtag_raw(self, ctx:", "self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await ctx.send(\"`/eval` has been registered.\") @commands.check(dev_check)", "embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"]) async def slashtag_usage(self,", "def slashtag(self, ctx: commands.Context): \"\"\" Slash Tag management with TagScript.", "List[SlashOption]: added_required = False for i in range(1, 11): try:", "usage_data in chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")), \"prolog\")", "from tabulate import tabulate from ..abc import MixinMeta from ..converters", "embeds.append(embed) # await menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command(\"list\")", "self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit) async def", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "in mind that if you choose to make this argument", "def slashtag_edit_name( self, ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False)", "delete_after=15) break options.append(option) if i == 10: break pred =", "asyncio.TimeoutError: return await ctx.send(\"Timed out, not deleting slash tags.\") if", "for [botname] slash commands if it differs from the bot", "ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"]) async", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add) async def slashtag_global_add( self, ctx: commands.Context, tag_name:", "( \"This server has no slash tags.\" if guild else", "don't have permission to add Slash Commands here. Reinvite me", "await ctx.send(\"Ok, not deleting slash tags.\") guild: discord.Guild = ctx.guild", "= app_id await ctx.send(f\"Application ID set to `{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\")", "commands.Context, *, tag: GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async", "to evaluate.\", required=True) ], ) await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command", "f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def view_slash_tags( self, ctx: commands.Context, tags: Dict[int, SlashTag],", "= TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command): overriden._help_override = doc else:", "\"✅\" if self.testing_enabled else \"❎\" description = [ f\"Application ID:", "want to delete all slash tags on this server? (Y/n)\",", "tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async def slashtag_edit_tagscript( self, ctx: commands.Context, tag: GuildTagConverter,", "are no global slash tags.\") await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command(\"usage\",", "*, link: PastebinConverter, ): await self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group(\"edit\",", "the tag description to be? (maximum 100 characters)\", pred=MessagePredicate.length_less(101, ctx),", "self.testing_enabled else \"❎\" description = [ f\"Application ID: **{self.application_id}**\", f\"Eval", "to create command {command!r} on guild {ctx.guild!r}\", exc_info=error ) text", "tags from the database.\"\"\" await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"clear\", hidden=True)", "ctx.send(f\"Reached max choices ({CHOICE_LIMIT}).\") break return choices async def get_option(", "try: await self.send_and_query_response( ctx, \"Are you sure you want to", "tag: GuildTagConverter, *, description: str ): \"\"\"Edit a slash tag's", "self.config.eval_command.clear() self.eval_command = None await ctx.send(\"`/eval` has been deleted.\") @slashtagset.command(\"testing\")", "slashtag_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): \"\"\"Edit", "SlashTag from ..testing.button_menus import menu as button_menu from ..utils import", "is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async def slashtag_user( self, ctx:", "\" \") return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def view_slash_tags( self, ctx: commands.Context,", "above copyright notice and this permission notice shall be included", "tags.\") await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage) async def", "aliases=[\"e\"], invoke_without_command=True) async def slashtag_edit( self, ctx: commands.Context, tag: GuildTagConverter,", "def slashtag_global_list(self, ctx: commands.Context): tags = self.global_tag_cache if not tags:", "if pred is None: pred = MessagePredicate.same_context(ctx) ask = await", "slash tag's raw content.\"\"\" await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag:", "worry about it. \"\"\" app_id = id or self.bot.user.id await", "slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add) async def slashtag_global_add( self,", "@commands.is_owner() @slashtag.command(\"clear\", hidden=True) async def slashtag_clear(self, ctx: commands.Context): \"\"\"Clear all", "Copyright (c) 2020-present phenom4n4n Permission is hereby granted, free of", "bool = False ) -> SlashOption: name_desc = [ \"What", "{testing_enabled}\", ] embed = discord.Embed( color=0xC9C9C9, title=\"SlashTags Settings\", description=\"\\n\".join(description) )", "pagify from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import MessagePredicate", "@copy_doc(slashtag_edit) async def slashtag_global_edit( self, ctx: commands.Context, tag: GlobalTagConverter, *,", "ctx.guild) @commands.is_owner() @slashtag.command(\"restore\", hidden=True) async def slashtag_restore(self, ctx: commands.Context): \"\"\"Restore", "discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags = \"global slash tags\" e.set_author(name=\"Global", "tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter):", "..http import ApplicationOptionChoice, SlashOptionType from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption,", "= \"loaded\" if target_state else \"unloaded\" return await ctx.send(f\"The SlashTag", "def slashtag_global_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter,", "to any person obtaining a copy of this software and", "debugging.\"\"\" if self.eval_command: return await ctx.send(\"An eval command is already", "*, tagscript: TagScriptConverter ): \"\"\"Edit a slash tag.\"\"\" await ctx.send(await", "if true_or_false is not None else not await self.config.testing_enabled() )", "is True: await self.get_options(ctx, options) command = ApplicationCommand( self, name=tag_name,", "from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify from redbot.core.utils.menus import", "set to `{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\") async def slashtagset_addeval(self, ctx: commands.Context):", "person obtaining a copy of this software and associated documentation", ") -> str: if pred is None: pred = MessagePredicate.same_context(ctx)", "*, tagscript: TagScriptConverter, ): \"\"\" Add a slash tag with", "await tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"]) async def slashtag_edit_argument( self, ctx: commands.Context,", "await self.http.put_guild_slash_commands(guild.id, []) for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del", "tagscript=tagscript) @slashtag_edit.command(\"name\") async def slashtag_edit_name( self, ctx: commands.Context, tag: GuildTagConverter,", "should the argument name be and description be?\", \"The argument", "menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command(\"list\") async def slashtag_list(self,", "= \"Loaded\" self.add_test_cog() else: loaded = \"Unloaded\" self.remove_test_cog() await ctx.send(f\"{loaded}", "and this permission notice shall be included in all copies", "description.\"\"\" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async def slashtag_edit_arguments(self, ctx:", "tabulate from ..abc import MixinMeta from ..converters import ( GlobalTagConverter,", "if is_global else ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT: try: description", ") except asyncio.TimeoutError: return await ctx.send(\"Timed out, not deleting slash", "following arguments must also be optional.*\", pred, ) required =", "as choices to \" \"the tag. Choice names and values", "id or self.bot.user.id await self.config.application_id.set(app_id) self.application_id = app_id await ctx.send(f\"Application", "from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE", "return await ctx.send(\"Tag addition timed out.\") else: description = \"\"", "1): embed = e.copy() embed.description = page embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)}", "*, name: TagName(check_global=False) ): \"\"\"Edit a slash tag's name.\"\"\" await", "commands.Context, true_or_false: bool = None): \"\"\" Load or unload the", "from ..abc import MixinMeta from ..converters import ( GlobalTagConverter, GuildTagConverter,", "ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): await self.create_slash_tag(ctx,", "{ctx.guild!r}\") # exc info unneeded since error handler should print", "command=command, ) await ctx.send(await tag.initialize()) async def get_options( self, ctx:", "\"Looks like I don't have permission to add Slash Commands", "`member:A member of this server.`\\n\", \"*Slash argument names may not", "that if you choose to make this argument optional, all", "\"prolog\") embed = e.copy() embed.description = usage_chart embeds.append(embed) await menu(ctx,", "commands.Context, tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name,", "List, Union import discord from redbot.core import commands from redbot.core.utils.chat_formatting", "\"\"\"Clear all slash tags for this server.\"\"\" pred = MessagePredicate.yes_or_no(ctx)", "async def slashtag_restore(self, ctx: commands.Context): \"\"\"Restore all slash tags from", "tag = SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, ) await", "Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"]) async def slashtag(self, ctx: commands.Context): \"\"\" Slash", "= False return SlashOption( name=name.lower(), description=description, option_type=option_type, required=required, choices=choices, )", "error handler should print it, however info on the command", "seperated by `|`. Example:\\n`dog:Doggo|cat:Catto`\" ) response = await self.send_and_query_response(ctx, query)", "documentation files (the \"Software\"), to deal in the Software without", "chunks, dev_check TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT =", "this server? (Y/n)\", pred ) except asyncio.TimeoutError: return await ctx.send(\"Timed", "types.FunctionType]): doc = original.help if isinstance(original, commands.Command) else original.__doc__ doc", "try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command =", "except discord.Forbidden as error: log.error( \"Failed to create command {command!r}", "be? (maximum 100 characters)\", pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError: return", "without restriction, including without limitation the rights to use, copy,", "*, tag: TagConverter): \"\"\"Get info about a slash tag that", "TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use TagScript", "tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(ctx,", "TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER )", "overriden return decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"]) async def slashtag(self,", "tag.uses for tag in tags.copy().values()}) e = discord.Embed(title=\"Slash Tag Stats\",", "if match.group(1): return \"[p]slashtag global\" repl = \"global \" name", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "slash_tags = \"slash tags\" e.set_author(name=\"Stored Slash Tags\", icon_url=ctx.guild.icon_url) embeds =", "ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice) if len(choices) >= CHOICE_LIMIT: await ctx.send(f\"Reached max", "SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, ) await ctx.send(await tag.initialize())", "async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None): \"\"\"", "guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER )", "discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for tag in copy(self.guild_tag_cache[guild.id]).values():", "[] for choice_text in response.split(\"|\"): if \":\" not in choice_text:", "choices async def get_option( self, ctx: commands.Context, *, added_required: bool", "character limit.\", delete_after=15, ) continue choice = ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice)", "= f\"✅ (**{self.eval_command}**)\" if self.eval_command else \"❎\" testing_enabled = \"✅\"", "@slashtag.command(\"pastebin\", aliases=[\"++\"]) async def slashtag_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False),", "this tag? (Y/n)\", pred ) except asyncio.TimeoutError: await ctx.send(\"Query timed", "info about a slash tag that is stored on this", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "choices to \" \"the tag. Choice names and values should", "Edit a slash tag's arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for", "\"\"\" Add a message command tag with TagScript. [Slash tag", "granted, free of charge, to any person obtaining a copy", "e.set_author(name=\"Stored Slash Tags\", icon_url=ctx.guild.icon_url) embeds = [] pages = list(pagify(description))", "in tags.copy().values()}) e = discord.Embed(title=\"Slash Tag Stats\", color=await ctx.embed_color()) embeds", "with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(ctx, tag_name,", "command_type == ApplicationCommandType.CHAT_INPUT: try: description = await self.send_and_query_response( ctx, \"What", "a slash tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\"", "await tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self,", "f\"Failed to parse `{choice_text}` to a choice as \" \"its", "@slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx: commands.Context, *,", "ask = await ctx.send(query) try: message = await self.bot.wait_for(\"message\", check=pred,", "\"\"\" await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True)", "async def get_options( self, ctx: commands.Context, options: List[SlashOption] ) ->", "ctx: commands.Context): \"\"\"Restore all slash tags from the database.\"\"\" await", "raise tag = SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, )", "has been registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\") async def slashtagset_rmeval(self, ctx: commands.Context):", "argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\", \"-\"]) async def slashtag_remove(self, ctx: commands.Context,", "f\"`{tag.type.get_prefix()}{tag.name}` - \" limit -= len(title) tagscript = tag.tagscript if", "registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\") async def slashtagset_rmeval(self, ctx: commands.Context): \"\"\"Remove the", "= ApplicationCommand( self, name=\"eval\", description=\"SlashTags debugging eval command. Only bot", "description: str ): \"\"\"Edit a slash tag's description.\"\"\" await ctx.send(await", "description=description, option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async def slashtag_message(", "a choice as its name and value \" \"weren't seperated", "Counter({tag.name: tag.uses for tag in tags.copy().values()}) e = discord.Embed(title=\"Slash Tag", "name_pred.result name, description = match.group(1), match.group(2) valid_option_types = [ name.lower()", "await ctx.send(await tag.initialize()) async def get_options( self, ctx: commands.Context, options:", "*, tagscript: TagScriptConverter ): \"\"\"Edit a slash tag's TagScript.\"\"\" await", "slash tag that is stored on this server.\"\"\" await tag.send_info(ctx)", "description = \"\" if command_type == ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx)", "exc info unneeded since error handler should print it, however", "tag with a Pastebin link. \"\"\" await self.create_slash_tag(ctx, tag_name, link,", "split by a `:`.\", \"Example: `member:A member of this server.`\\n\",", "valid_option_types])}\", \"(select `string` if you don't understand)\", ] option_type =", "{eval_command}\", f\"Test cog loaded: {testing_enabled}\", ] embed = discord.Embed( color=0xC9C9C9,", "\"The argument name and description should be split by a", "\" \"weren't seperated by a `:`.\", delete_after=15, ) continue if", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "`{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\") async def slashtagset_addeval(self, ctx: commands.Context): \"\"\"Add a", "GuildTagConverter, *, description: str ): \"\"\"Edit a slash tag's description.\"\"\"", "you want to delete all slash tags on this server?", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "doc) if isinstance(overriden, commands.Command): overriden._help_override = doc else: overriden.__doc__ =", "arguments. \"\"\" await tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"]) async def slashtag_edit_argument( self,", "ID. This only applies to legacy bots. If you don't", "global\" repl = \"global \" name = match.group(0) repl +=", "await ctx.send(\"Tags deleted.\") @commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag) async def slashtag_global(self, ctx:", "self.add_test_cog() else: loaded = \"Unloaded\" self.remove_test_cog() await ctx.send(f\"{loaded} the SlashTag", "global slash tags.\") await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage)", "= [ self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t: t.name)", "return await ctx.send(\"Ok, not deleting slash tags.\") guild: discord.Guild =", "self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, ) await ctx.send(await tag.initialize()) async", "await ctx.send( f\"Failed to parse `{choice_text}` to a choice as", "choice as \" \"its name or value exceeded the 100", "self, ctx: commands.Context, query: str, pred: MessagePredicate = None, *,", "on this server.\") await self.view_slash_tags(ctx, tags, is_global=False) async def show_slash_tag_usage(self,", "to `{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\") async def slashtagset_addeval(self, ctx: commands.Context): \"\"\"Add", "try: await command.register() except discord.Forbidden as error: log.error( \"Failed to", "discord.Embed(title=\"Slash Tag Stats\", color=await ctx.embed_color()) embeds = [] for usage_data", "notice shall be included in all copies or substantial portions", "ctx: commands.Context): \"\"\"Clear all slash tags for this server.\"\"\" pred", "= match.group(1), match.group(2) valid_option_types = [ name.lower() for name in", "async def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]: query = (", "): \"\"\" Add a slash tag with TagScript. [Slash tag", "@copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx, None) @commands.is_owner()", "box, humanize_list, inline, pagify from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from", "slash tag's argument by name.\"\"\" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\",", "or value exceeded the 100 character limit.\", delete_after=15, ) continue", "added_required = False for i in range(1, 11): try: option", "guild_id=guild_id, options=options, type=command_type, ) try: await command.register() except discord.Forbidden as", "aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit( self, ctx: commands.Context, tag:", "this argument required? (Y/n)\\n*Keep in mind that if you choose", "usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER", "Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]): doc = original.help if", "commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript))", "import menu as button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check", "embed = discord.Embed( color=0xC9C9C9, title=\"SlashTags Settings\", description=\"\\n\".join(description) ) await ctx.send(embed=embed)", "await self.config.eval_command.clear() self.eval_command = None await ctx.send(\"`/eval` has been deleted.\")", "= [ \"What should the argument type be?\", f\"Valid option", "return SlashOption( name=name.lower(), description=description, option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\")", "guild_id=guild_id, author_id=ctx.author.id, command=command, ) await ctx.send(await tag.initialize()) async def get_options(", "tagscript: TagScriptConverter ): \"\"\"Edit a slash tag.\"\"\" await ctx.send(await tag.edit_tagscript(tagscript))", "slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter): \"\"\" Edit a slash tag's", "slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command(\"raw\")", "MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, \"Is this argument required? (Y/n)\\n*Keep in", "asyncio.TimeoutError: await ctx.send(\"Adding this argument timed out.\", delete_after=15) break options.append(option)", "to a choice as \" \"its name or value exceeded", "tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) async def slashtag_edit(", "be optional.*\", pred, ) required = pred.result else: await ctx.send(", "if not CHOICE_RE.match(choice_text): await ctx.send( f\"Failed to parse `{choice_text}` to", "copies of the Software, and to permit persons to whom", "def slashtag_edit_tagscript( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter", "to deal in the Software without restriction, including without limitation", "*, tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name)", "tags\" e.set_author(name=\"Global Slash Tags\", icon_url=ctx.me.avatar_url) else: slash_tags = \"slash tags\"", "await self.get_choices(ctx) option_type = \"STRING\" else: choices = [] option_type", "commands if it differs from the bot user ID. This", "\"There are no global slash tags.\" ) return await ctx.send(message)", "else: if pred.result is True: await self.get_options(ctx, options) command =", "ID set to `{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\") async def slashtagset_addeval(self, ctx:", "is_global: bool = False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options:", "): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self,", "= [] pages = list(pagify(description)) for index, page in enumerate(pages,", "on guild {ctx.guild!r}\", exc_info=error ) text = ( \"Looks like", "License Copyright (c) 2020-present phenom4n4n Permission is hereby granted, free", "= MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Would you like to", "): \"\"\" Add a user command tag with TagScript. [Slash", "None else not await self.config.testing_enabled() ) if target_state is self.testing_enabled:", "name if name.istitle(): repl = repl.title() return repl def copy_doc(original:", "all slash tags for this server.\"\"\" pred = MessagePredicate.yes_or_no(ctx) try:", "await ctx.send(\"The eval command hasn't been registered.\") try: await self.http.remove_slash_command(self.eval_command)", "self.testing_enabled: loaded = \"loaded\" if target_state else \"unloaded\" return await", "limitation the rights to use, copy, modify, merge, publish, distribute,", "subject to the following conditions: The above copyright notice and", "== 10: break pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx,", "Tags\", icon_url=ctx.me.avatar_url) else: slash_tags = \"slash tags\" e.set_author(name=\"Stored Slash Tags\",", "TagName(check_global=False) ): \"\"\"Edit a slash tag's name.\"\"\" await ctx.send(await tag.edit_name(name))", "database.\"\"\" await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"clear\", hidden=True) async def slashtag_clear(self,", "SlashOption( name=name.lower(), description=description, option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async", "copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send(\"Tags", "aliases=[\"stats\"]) async def slashtag_usage(self, ctx: commands.Context): \"\"\" See this slash", "GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self,", "*, tagscript: TagScriptConverter, ): \"\"\" Add a message command tag", "\"\"\"Remove the slash eval command.\"\"\" if not self.eval_command: return await", "command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] = [] guild_id", "@slashtag_global.command(\"add\") @copy_doc(slashtag_add) async def slashtag_global_add( self, ctx: commands.Context, tag_name: TagName(global_priority=True),", "cog. \"\"\" target_state = ( true_or_false if true_or_false is not", "if not tags: return await ctx.send(\"There are no global slash", "bot user ID. This only applies to legacy bots. If", "loaded = \"Unloaded\" self.remove_test_cog() await ctx.send(f\"{loaded} the SlashTag interaction testing", "= re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT = 25 log =", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "contain characters \" \"that are alphanumeric or '_' or '-'.\",", "MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower() == \"choices\": choices = await", "that is stored on this server.\"\"\" await tag.send_info(ctx) @slashtag.command(\"raw\") async", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "slashtag_message( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter,", "not tags: return await ctx.send(\"There are no slash tags on", "@slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter):", "out.\") else: description = \"\" if command_type == ApplicationCommandType.CHAT_INPUT: pred", "one was optional.\", delete_after=15, ) required = False return SlashOption(", "\"\"\"Restore all slash tags from the database.\"\"\" await self.restore_tags(ctx, ctx.guild)", "\"\"\" await self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True)", "pred ) except asyncio.TimeoutError: return await ctx.send(\"Timed out, not deleting", "ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self,", "tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self, ctx: commands.Context,", "await ctx.send(\"Query timed out, not adding additional arguments.\") break if", "await self.send_and_query_response( ctx, \"What should the tag description to be?", "name_desc = [ \"What should the argument name be and", "re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT = 25 log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\")", "@commands.guild_only() @commands.group(aliases=[\"st\"]) async def slashtag(self, ctx: commands.Context): \"\"\" Slash Tag", "await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\",", ") @slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self, ctx: commands.Context,", "f\"✅ (**{self.eval_command}**)\" if self.eval_command else \"❎\" testing_enabled = \"✅\" if", "-> str: if match.group(1): return \"[p]slashtag global\" repl = \"global", "\"(select `string` if you don't understand)\", ] option_type = await", "you sure you want to delete all slash tags on", "ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): \"\"\" Add", "self.delete_quietly(message) return message.content async def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]:", "{command!r} on guild {ctx.guild!r}\") # exc info unneeded since error", ") required = False return SlashOption( name=name.lower(), description=description, option_type=option_type, required=required,", "aliases=[\"stats\"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\",", "= 25 log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match: re.Match) -> str:", "self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t: t.name) ] description", "a `:`.\", delete_after=15, ) continue if not CHOICE_RE.match(choice_text): await ctx.send(", "registered.\") slasheval = ApplicationCommand( self, name=\"eval\", description=\"SlashTags debugging eval command.", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "asyncio import logging import re import types from collections import", "self.eval_command: return await ctx.send(\"The eval command hasn't been registered.\") try:", "self, ctx: commands.Context, tag: GlobalTagConverter, *, name: TagName(global_priority=True), ): await", "use TagScript blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\", \"+\"]) async def", "if name.istitle(): repl = repl.title() return repl def copy_doc(original: Union[commands.Command,", "): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self,", "return await ctx.send(\"There are no global slash tags.\") await self.view_slash_tags(ctx,", "@slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx,", "*, tag: GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async def", "else: loaded = \"Unloaded\" self.remove_test_cog() await ctx.send(f\"{loaded} the SlashTag interaction", "100 character limit.\", delete_after=15, ) continue choice = ApplicationOptionChoice(*choice_text.split(\":\", 1))", "key=lambda t: t.name) ] description = \"\\n\".join(description) e = discord.Embed(color=await", "app_id = id or self.bot.user.id await self.config.application_id.set(app_id) self.application_id = app_id", "tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list) async def slashtag_global_list(self, ctx: commands.Context): tags =", "delete_after=15, ) required = False return SlashOption( name=name.lower(), description=description, option_type=option_type,", "try: await self.send_and_query_response( ctx, \"Would you like to add arguments", "@copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True)", "and can only contain characters \" \"that are alphanumeric or", "= True except asyncio.TimeoutError: await ctx.send(\"Adding this argument timed out.\",", "def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\")", "def slashtag_global_edit( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter", "await ctx.send(text) except Exception: log.error(\"Failed to create command {command!r} on", "description=\"Code body to evaluate.\", required=True) ], ) await slasheval.register() await", "documentation on how to use TagScript blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\",", "choices = [] for choice_text in response.split(\"|\"): if \":\" not", "else not await self.config.testing_enabled() ) if target_state is self.testing_enabled: loaded", "ctx.send(await tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx: commands.Context, *,", "else ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT: try: description = await", "async def slashtag_edit_name( self, ctx: commands.Context, tag: GuildTagConverter, *, name:", "def slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore) async", "the following conditions: The above copyright notice and this permission", "= ( \"This server has no slash tags.\" if guild", ") @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async def slashtag_message( self, ctx: commands.Context, tag_name:", "{slash_tags}\") embeds.append(embed) # await menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds)", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "link, is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit( self,", "\"\"\" await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async def create_slash_tag( self,", "await ctx.send(query) try: message = await self.bot.wait_for(\"message\", check=pred, timeout=timeout) except", "DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import MessagePredicate from tabulate import tabulate", "description must be less than or equal to 100 characters.*\",", "options=[ SlashOption(name=\"body\", description=\"Code body to evaluate.\", required=True) ], ) await", "commands.Context, *, added_required: bool = False ) -> SlashOption: name_desc", "async def slashtagset_addeval(self, ctx: commands.Context): \"\"\"Add a slash eval command", "import ApplicationOptionChoice, SlashOptionType from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag", "tag.delete()) @slashtag.command(\"info\") async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter):", "-> str: title = f\"`{tag.type.get_prefix()}{tag.name}` - \" limit -= len(title)", "await ctx.send( \"This argument was automatically made optional as the", "Load or unload the SlashTag interaction development test cog. \"\"\"", "command.register() except discord.Forbidden as error: log.error( \"Failed to create command", "> limit - 3: tagscript = tagscript[:limit] + \"...\" tagscript", "\"\"\" await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True)", "tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message) async def slashtag_global_message( self,", "slashtagset_settings(self, ctx: commands.Context): \"\"\"View SlashTags settings.\"\"\" eval_command = f\"✅ (**{self.eval_command}**)\"", "commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): await self.create_slash_tag(ctx, tag_name,", "like to add another argument? (Y/n)\", pred ) except asyncio.TimeoutError:", "this server.\") await self.view_slash_tags(ctx, tags, is_global=False) async def show_slash_tag_usage(self, ctx:", "THE SOFTWARE. \"\"\" import asyncio import logging import re import", "if not option.required: added_required = True except asyncio.TimeoutError: await ctx.send(\"Adding", "ctx.send( \"This argument was automatically made optional as the previous", "isinstance(original, commands.Command) else original.__doc__ doc = TAG_RE.sub(_sub, doc) if isinstance(overriden,", "self.delete_quietly(ask) raise await self.delete_quietly(ask) await self.delete_quietly(message) return message.content async def", "icon_url=ctx.guild.icon_url) embeds = [] pages = list(pagify(description)) for index, page", "if guild else \"There are no global slash tags.\" )", "await self.get_option(ctx, added_required=added_required) if not option.required: added_required = True except", "on slash tag arguments. \"\"\" await tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"]) async", "async def slashtag_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str", "ctx.send(f\"Application ID set to `{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\") async def slashtagset_addeval(self,", "await self.send_and_query_response(ctx, query) choices = [] for choice_text in response.split(\"|\"):", "command_type == ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx,", "redbot.core import commands from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify", "is_global=False) async def create_slash_tag( self, ctx: commands.Context, tag_name: str, tagscript:", "= discord.Embed(title=\"Slash Tag Stats\", color=await ctx.embed_color()) embeds = [] for", "GuildTagConverter): \"\"\"Get a slash tag's raw content.\"\"\" await tag.send_raw_tagscript(ctx) @classmethod", "user ID. This only applies to legacy bots. If you", "options: List[SlashOption] ) -> List[SlashOption]: added_required = False for i", "Tag management with TagScript. These commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html)", "ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus import menu as button_menu from", "redbot.core.utils.predicates import MessagePredicate from tabulate import tabulate from ..abc import", "int = 60, ) -> str: if pred is None:", "for this server.\"\"\" pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx,", "= ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice) if len(choices) >= CHOICE_LIMIT: await ctx.send(f\"Reached", "self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx:", "try: description = await self.send_and_query_response( ctx, \"What should the tag", "tag.tagscript if len(tagscript) > limit - 3: tagscript = tagscript[:limit]", "= e.copy() embed.description = page embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)} {slash_tags}\") embeds.append(embed)", "option_type = \"STRING\" else: choices = [] option_type = SlashOptionType[option_type.upper()]", "self.guild_tag_cache[ctx.guild.id] if not tags: return await ctx.send(\"There are no slash", "slashtag_edit_tagscript( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ):", "commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): \"\"\" Add a", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "= match.group(0) repl += name if name.istitle(): repl = repl.title()", "tags for this server.\"\"\" pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response(", "else: description = \"\" if command_type == ApplicationCommandType.CHAT_INPUT: pred =", "command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"]) async def slashtag_pastebin( self, ctx:", "await ctx.send(\"An eval command is already registered.\") slasheval = ApplicationCommand(", "tagscript: str, *, is_global: bool = False, command_type: ApplicationCommandType =", "if not self.eval_command: return await ctx.send(\"The eval command hasn't been", "be less than or equal to 100 characters.*\", ] name_pred", "List[SlashOption] ) -> List[SlashOption]: added_required = False for i in", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "<https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return await ctx.send(text) except Exception: log.error(\"Failed to", "make this argument optional, all following arguments must also be", "@slashtag_edit.command(\"description\") async def slashtag_edit_description( self, ctx: commands.Context, tag: GuildTagConverter, *,", "slash tags on this server? (Y/n)\", pred ) except asyncio.TimeoutError:", "@slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self, ctx: commands.Context, tag: GlobalTagConverter,", "This only applies to legacy bots. If you don't know", "name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred) match =", "ctx: commands.Context, *, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list) async", "slash tags.\"\"\" tags = self.guild_tag_cache[ctx.guild.id] if not tags: return await", "tag: GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async def slashtag_global_raw(self,", "unload the SlashTag interaction development test cog. \"\"\" target_state =", "from ..http import ApplicationOptionChoice, SlashOptionType from ..objects import ApplicationCommand, ApplicationCommandType,", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower() == \"choices\": choices", "def slashtag_edit_description( self, ctx: commands.Context, tag: GuildTagConverter, *, description: str", "from the database.\"\"\" await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"clear\", hidden=True) async", "GlobalTagConverter, *, name: TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description)", "MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred) match = name_pred.result name,", "also be optional.*\", pred, ) required = pred.result else: await", "again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return await ctx.send(text) except Exception: log.error(\"Failed", "else: await ctx.send( \"This argument was automatically made optional as", "@commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"]) async def slashtag_pastebin( self, ctx: commands.Context, tag_name:", "await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"]) async def slashtag_usage(self, ctx:", "..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE =", "tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async", "has documentation on how to use TagScript blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True)", "ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ): \"\"\"Edit a", "application ID for [botname] slash commands if it differs from", "required=True) ], ) await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id", "on how to use TagScript blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\",", "if isinstance(original, commands.Command) else original.__doc__ doc = TAG_RE.sub(_sub, doc) if", "\":\" not in choice_text: await ctx.send( f\"Failed to parse `{choice_text}`", "tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self, ctx: commands.Context,", "self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred) match = name_pred.result name, description = match.group(1),", "debugging eval command. Only bot owners can use this.\", options=[", "break return options async def send_and_query_response( self, ctx: commands.Context, query:", "return await ctx.send(text) except Exception: log.error(\"Failed to create command {command!r}", "await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command(\"name\") async def slashtag_edit_name( self, ctx:", "async def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter): await ctx.send(await", "if i == 10: break pred = MessagePredicate.yes_or_no(ctx) try: await", "doc = original.help if isinstance(original, commands.Command) else original.__doc__ doc =", "(maximum 100 characters)\", pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError: return await", "def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]: query = ( \"Send", "ctx: commands.Context, *, added_required: bool = False ) -> SlashOption:", "await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message) async def", "await ctx.send(\"Query timed out, not adding arguments.\") else: if pred.result", "pass await self.config.eval_command.clear() self.eval_command = None await ctx.send(\"`/eval` has been", "asyncio.TimeoutError: await ctx.send(\"Query timed out, not adding additional arguments.\") break", "chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")), \"prolog\") embed =", "typing import Dict, List, Union import discord from redbot.core import", "description should be split by a `:`.\", \"Example: `member:A member", "@slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter):", "*, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True,", "has been deleted.\") @slashtagset.command(\"testing\") async def slashtagset_testing(self, ctx: commands.Context, true_or_false:", "if target_state else \"unloaded\" return await ctx.send(f\"The SlashTag interaction testing", "slash tag.\"\"\" await ctx.send(await tag.delete()) @slashtag.command(\"info\") async def slashtag_info(self, ctx:", "= MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Are you sure you", "OTHER DEALINGS IN THE SOFTWARE. \"\"\" import asyncio import logging", "\"\"\" await tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"]) async def slashtag_edit_argument( self, ctx:", "= MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred) match = name_pred.result", "info on the command options is needed raise tag =", "command = ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type, )", "await self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred) match = name_pred.result name, description =", "*, name: TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async", "self.global_tag_cache if not tags: return await ctx.send(\"There are no global", "you would like to add as choices to \" \"the", "original.help if isinstance(original, commands.Command) else original.__doc__ doc = TAG_RE.sub(_sub, doc)", "choice_text in response.split(\"|\"): if \":\" not in choice_text: await ctx.send(", "distribute, sublicense, and/or sell copies of the Software, and to", "DEALINGS IN THE SOFTWARE. \"\"\" import asyncio import logging import", "\"\"\" Add a slash tag with a Pastebin link. \"\"\"", "def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter): await ctx.send(await tag.delete())", ") await ctx.send(await tag.initialize()) async def get_options( self, ctx: commands.Context,", "tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache if not tags:", "[ self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t: t.name) ]", "await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self, ctx:", "argument type be?\", f\"Valid option types: {humanize_list([inline(n) for n in", "has no slash tags.\" if guild else \"There are no", "slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list)", "SOFTWARE. \"\"\" import asyncio import logging import re import types", "ctx.send(await tag.delete()) @slashtag.command(\"info\") async def slashtag_info(self, ctx: commands.Context, *, tag:", "async def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Get a", "adding additional arguments.\") break if pred.result is False: break return", "invite link and try again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return await", "for n in valid_option_types])}\", \"(select `string` if you don't understand)\",", "of this server.`\\n\", \"*Slash argument names may not exceed 32", "ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Delete a slash tag.\"\"\" await", "a single slash tag's argument by name.\"\"\" await tag.edit_single_option(ctx, argument)", "self, ctx: commands.Context, tag_name: str, tagscript: str, *, is_global: bool", "self.send_and_query_response(ctx, query) choices = [] for choice_text in response.split(\"|\"): if", "description=\"SlashTags debugging eval command. Only bot owners can use this.\",", "\"This argument was automatically made optional as the previous one", "interaction development test cog. \"\"\" target_state = ( true_or_false if", "decorator(overriden: Union[commands.Command, types.FunctionType]): doc = original.help if isinstance(original, commands.Command) else", "match.group(1): return \"[p]slashtag global\" repl = \"global \" name =", "options async def send_and_query_response( self, ctx: commands.Context, query: str, pred:", "this means, you don't need to worry about it. \"\"\"", "name or value exceeded the 100 character limit.\", delete_after=15, )", "discord from redbot.core import commands from redbot.core.utils.chat_formatting import box, humanize_list,", "may not exceed 32 characters and can only contain characters", "{humanize_list([inline(n) for n in valid_option_types])}\", \"(select `string` if you don't", "Settings\", description=\"\\n\".join(description) ) await ctx.send(embed=embed) @slashtagset.command(\"appid\") async def slashtagset_appid(self, ctx:", "tag that is stored on this server.\"\"\" await tag.send_info(ctx) @slashtag.command(\"raw\")", "\"weren't seperated by a `:`.\", delete_after=15, ) continue if not", "description = await self.send_and_query_response( ctx, \"What should the tag description", "on guild {ctx.guild!r}\") # exc info unneeded since error handler", "except discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command = None await ctx.send(\"`/eval`", "from ..testing.button_menus import menu as button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION,", "loaded: {testing_enabled}\", ] embed = discord.Embed( color=0xC9C9C9, title=\"SlashTags Settings\", description=\"\\n\".join(description)", "async def slashtag_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link:", "to do so, subject to the following conditions: The above", "options is needed raise tag = SlashTag( self, tagscript, guild_id=guild_id,", "self.get_options(ctx, options) command = ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id, options=options,", "was optional.\", delete_after=15, ) required = False return SlashOption( name=name.lower(),", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "MixinMeta from ..converters import ( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName,", "limit - 3: tagscript = tagscript[:limit] + \"...\" tagscript =", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Are you sure you want", "await ctx.send(await tag.delete()) @slashtag.command(\"info\") async def slashtag_info(self, ctx: commands.Context, *,", "not deleting slash tags.\") guild: discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id,", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "await self.config.testing_enabled() ) if target_state is self.testing_enabled: loaded = \"loaded\"", "Reinvite me \" \"with this invite link and try again:", "str ): await tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove) async", "view_slash_tags( self, ctx: commands.Context, tags: Dict[int, SlashTag], *, is_global: bool,", "def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter): \"\"\"Get info about", ") from ..http import ApplicationOptionChoice, SlashOptionType from ..objects import ApplicationCommand,", "False ) -> SlashOption: name_desc = [ \"What should the", "is already {loaded}.\") await self.config.testing_enabled.set(target_state) if target_state: loaded = \"Loaded\"", "slash tags.\" if guild else \"There are no global slash", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "False: break return options async def send_and_query_response( self, ctx: commands.Context,", "a slash tag's name.\"\"\" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\") async def", "( true_or_false if true_or_false is not None else not await", "[]) for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear()", "@copy_doc(slashtag_user) async def slashtag_global_user( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False),", "from the bot user ID. This only applies to legacy", ") -> SlashOption: name_desc = [ \"What should the argument", "tagscript, is_global=False) async def create_slash_tag( self, ctx: commands.Context, tag_name: str,", "ctx.send(\"An eval command is already registered.\") slasheval = ApplicationCommand( self,", "tag: GuildTagConverter, *, tagscript: TagScriptConverter ): \"\"\"Edit a slash tag.\"\"\"", "@commands.group(aliases=[\"slashset\"]) async def slashtagset(self, ctx: commands.Context): \"\"\"Manage SlashTags settings.\"\"\" @slashtagset.command(\"settings\")", "await self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit) async", "exc_info=error ) text = ( \"Looks like I don't have", "[ \"What should the argument name be and description be?\",", "= None if is_global else ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT:", "copyright notice and this permission notice shall be included in", "\" \"with this invite link and try again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\"", "commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async def", "@slashtagset.command(\"testing\") async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None):", "slashtag_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ):", "command: {eval_command}\", f\"Test cog loaded: {testing_enabled}\", ] embed = discord.Embed(", "site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use TagScript blocks. \"\"\"", "embeds) @slashtag.command(\"list\") async def slashtag_list(self, ctx: commands.Context): \"\"\"View stored slash", "= [] for usage_data in chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data,", "text = ( \"Looks like I don't have permission to", "interaction testing cog is already {loaded}.\") await self.config.testing_enabled.set(target_state) if target_state:", "= [] for choice_text in response.split(\"|\"): if \":\" not in", "ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache()", "and to permit persons to whom the Software is furnished", "you like to add arguments to this tag? (Y/n)\", pred", "slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=[\"slashset\"]) async def", "hidden=True) async def slashtag_restore(self, ctx: commands.Context): \"\"\"Restore all slash tags", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "SlashTag], *, is_global: bool, ): description = [ self.format_tagscript(tag) for", "25 log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match: re.Match) -> str: if", "See this slash tag usage stats. **Example:** `[p]slashtag usage` \"\"\"", "Add a slash tag with a Pastebin link. \"\"\" await", "message.content async def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]: query =", "aliases=[\"create\", \"+\"]) async def slashtag_add( self, ctx: commands.Context, tag_name: TagName(check_global=False),", "you like to add another argument? (Y/n)\", pred ) except", "whom the Software is furnished to do so, subject to", "embed.description = page embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)} {slash_tags}\") embeds.append(embed) # await", "ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False) ): \"\"\"Edit a", "slash tags.\" ) return await ctx.send(message) counter = Counter({tag.name: tag.uses", "automatically made optional as the previous one was optional.\", delete_after=15,", "@slashtag.command(\"add\", aliases=[\"create\", \"+\"]) async def slashtag_add( self, ctx: commands.Context, tag_name:", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\")", "self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"restore\", hidden=True) async def slashtag_restore(self, ctx: commands.Context):", "return await ctx.send(\"The eval command hasn't been registered.\") try: await", "in all copies or substantial portions of the Software. THE", "import box, humanize_list, inline, pagify from redbot.core.utils.menus import DEFAULT_CONTROLS, menu", "2020-present phenom4n4n Permission is hereby granted, free of charge, to", "\"+\"]) async def slashtag_add( self, ctx: commands.Context, tag_name: TagName(check_global=False), *,", "here. Reinvite me \" \"with this invite link and try", "slashtag_global_edit_tagscript( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ):", "of this software and associated documentation files (the \"Software\"), to", "import logging import re import types from collections import Counter", "try: option = await self.get_option(ctx, added_required=added_required) if not option.required: added_required", "ctx: commands.Context, tag: GlobalTagConverter, *, name: TagName(global_priority=True), ): await ctx.send(await", "optional, all following arguments must also be optional.*\", pred, )", "sublicense, and/or sell copies of the Software, and to permit", "List[ApplicationOptionChoice]: query = ( \"Send the list of choice names", "this argument optional, all following arguments must also be optional.*\",", "\"should be seperated by `|`. Example:\\n`dog:Doggo|cat:Catto`\" ) response = await", "type be?\", f\"Valid option types: {humanize_list([inline(n) for n in valid_option_types])}\",", "-> List[ApplicationOptionChoice]: query = ( \"Send the list of choice", ") return await ctx.send(message) counter = Counter({tag.name: tag.uses for tag", "\"❎\" description = [ f\"Application ID: **{self.application_id}**\", f\"Eval command: {eval_command}\",", "choice \" \"should be seperated by `|`. Example:\\n`dog:Doggo|cat:Catto`\" ) response", "False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] = []", "check_regex=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a user command", "check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript,", "a slash tag with a Pastebin link. \"\"\" await self.create_slash_tag(ctx,", "hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx, None)", "async def slashtagset(self, ctx: commands.Context): \"\"\"Manage SlashTags settings.\"\"\" @slashtagset.command(\"settings\") async", "unneeded since error handler should print it, however info on", "was automatically made optional as the previous one was optional.\",", "name.\"\"\" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\", \"-\"]) async def", "@slashtagset.command(\"settings\") async def slashtagset_settings(self, ctx: commands.Context): \"\"\"View SlashTags settings.\"\"\" eval_command", "set the application ID for [botname] slash commands if it", "slash tags.\") await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage) async", "\"loaded\" if target_state else \"unloaded\" return await ctx.send(f\"The SlashTag interaction", "understand)\", ] option_type = await self.send_and_query_response( ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx),", "tag: GuildTagConverter, *, name: TagName(check_global=False) ): \"\"\"Edit a slash tag's", "this permission notice shall be included in all copies or", ">= CHOICE_LIMIT: await ctx.send(f\"Reached max choices ({CHOICE_LIMIT}).\") break return choices", "embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"]) async def slashtag_usage(self, ctx: commands.Context): \"\"\"", "\"\\n\".join(description) e = discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags = \"global", "ctx: commands.Context): \"\"\"Remove the slash eval command.\"\"\" if not self.eval_command:", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "await ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\") async def slashtag_edit_description( self, ctx: commands.Context,", "= doc else: overriden.__doc__ = doc return overriden return decorator", "button_menu(ctx, embeds) @slashtag.command(\"list\") async def slashtag_list(self, ctx: commands.Context): \"\"\"View stored", "await ctx.send(\"Adding this argument timed out.\", delete_after=15) break options.append(option) if", "await self.config.testing_enabled.set(target_state) if target_state: loaded = \"Loaded\" self.add_test_cog() else: loaded", "@copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self, ctx: commands.Context, tag: GlobalTagConverter, *,", "re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT = 25 log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match: re.Match)", "name = match.group(0) repl += name if name.istitle(): repl =", "..converters import ( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter, )", "\"\" if command_type == ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try: await", "def slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command(\"add\") @copy_doc(slashtag_add) async def slashtag_global_add(", "tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send(\"Tags deleted.\")", "= discord.Embed( color=0xC9C9C9, title=\"SlashTags Settings\", description=\"\\n\".join(description) ) await ctx.send(embed=embed) @slashtagset.command(\"appid\")", "import tabulate from ..abc import MixinMeta from ..converters import (", "-> List[SlashOption]: added_required = False for i in range(1, 11):", "CHOICE_LIMIT: await ctx.send(f\"Reached max choices ({CHOICE_LIMIT}).\") break return choices async", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "testing_enabled = \"✅\" if self.testing_enabled else \"❎\" description = [", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a", "embeds = [] for usage_data in chunks(counter.most_common(), 10): usage_chart =", "tags, is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx: commands.Context):", "from copy import copy from typing import Dict, List, Union", "from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus import", "async def slashtag_message( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *,", "slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument)", "list of choice names and values you would like to", "tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript)", "timed out, not adding additional arguments.\") break if pred.result is", "shall be included in all copies or substantial portions of", "@slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit( self, ctx: commands.Context,", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "and try again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return await ctx.send(text) except", "slashtag_global_edit_name( self, ctx: commands.Context, tag: GlobalTagConverter, *, name: TagName(global_priority=True), ):", "slashtag_usage(self, ctx: commands.Context): \"\"\" See this slash tag usage stats.", "guild: discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for tag in", "= f\"`{tag.type.get_prefix()}{tag.name}` - \" limit -= len(title) tagscript = tag.tagscript", "for choice_text in response.split(\"|\"): if \":\" not in choice_text: await", "Slash Commands here. Reinvite me \" \"with this invite link", "slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None): \"\"\" Load or", "Slash Tags\", icon_url=ctx.guild.icon_url) embeds = [] pages = list(pagify(description)) for", "commands.Context): \"\"\"Remove the slash eval command.\"\"\" if not self.eval_command: return", "= [ \"What should the argument name be and description", "await self.config.guild(guild).tags.clear() await ctx.send(\"Tags deleted.\") @commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag) async def", "ID: **{self.application_id}**\", f\"Eval command: {eval_command}\", f\"Test cog loaded: {testing_enabled}\", ]", "@slashtag_edit.command(\"argument\", aliases=[\"option\"]) async def slashtag_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter,", "link: PastebinConverter, ): \"\"\" Add a slash tag with a", "return decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"]) async def slashtag(self, ctx:", "[] option_type = SlashOptionType[option_type.upper()] if not added_required: pred = MessagePredicate.yes_or_no(ctx)", "str: title = f\"`{tag.type.get_prefix()}{tag.name}` - \" limit -= len(title) tagscript", "def slashtag_global_edit_description( self, ctx: commands.Context, tag: GlobalTagConverter, *, description: str", "discord.Embed( color=0xC9C9C9, title=\"SlashTags Settings\", description=\"\\n\".join(description) ) await ctx.send(embed=embed) @slashtagset.command(\"appid\") async", "ctx.send(\"`/eval` has been registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\") async def slashtagset_rmeval(self, ctx:", "in valid_option_types])}\", \"(select `string` if you don't understand)\", ] option_type", "ctx: commands.Context): \"\"\"View SlashTags settings.\"\"\" eval_command = f\"✅ (**{self.eval_command}**)\" if", "pred.result is False: break return options async def send_and_query_response( self,", "not exceed 32 characters and can only contain characters \"", "only applies to legacy bots. If you don't know what", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "async def view_slash_tags( self, ctx: commands.Context, tags: Dict[int, SlashTag], *,", "Commands here. Reinvite me \" \"with this invite link and", "limit.\", delete_after=15, ) continue choice = ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice) if", "or self.bot.user.id await self.config.application_id.set(app_id) self.application_id = app_id await ctx.send(f\"Application ID", "def view_slash_tags( self, ctx: commands.Context, tags: Dict[int, SlashTag], *, is_global:", "async def slashtag_global_edit( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript:", "@slashtag.command(\"remove\", aliases=[\"delete\", \"-\"]) async def slashtag_remove(self, ctx: commands.Context, *, tag:", "the Software is furnished to do so, subject to the", "): \"\"\" Add a slash tag with a Pastebin link.", "alphanumeric or '_' or '-'.\", \"The argument description must be", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "): \"\"\"Edit a slash tag's TagScript.\"\"\" await self.slashtag_edit(ctx, tag, tagscript=tagscript)", "async def slashtag_global_list(self, ctx: commands.Context): tags = self.global_tag_cache if not", "tabulate import tabulate from ..abc import MixinMeta from ..converters import", "out, not adding arguments.\") else: if pred.result is True: await", "tag description to be? (maximum 100 characters)\", pred=MessagePredicate.length_less(101, ctx), )", "self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"])", "hasn't been registered.\") try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await", "and description be?\", \"The argument name and description should be", "or equal to 100 characters.*\", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx)", "tags on this server.\") await self.view_slash_tags(ctx, tags, is_global=False) async def", "= ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] = [] guild_id = None", "ApplicationCommandType.CHAT_INPUT: try: description = await self.send_and_query_response( ctx, \"What should the", "types from collections import Counter from copy import copy from", "argument required? (Y/n)\\n*Keep in mind that if you choose to", "\") return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def view_slash_tags( self, ctx: commands.Context, tags:", "from collections import Counter from copy import copy from typing", "create command {command!r} on guild {ctx.guild!r}\", exc_info=error ) text =", "*, tag: GuildTagConverter): \"\"\"Delete a slash tag.\"\"\" await ctx.send(await tag.delete())", "aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter): await", ") required = pred.result else: await ctx.send( \"This argument was", "GuildTagConverter, *, tagscript: TagScriptConverter ): \"\"\"Edit a slash tag's TagScript.\"\"\"", "None): tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache if not", "arguments must also be optional.*\", pred, ) required = pred.result", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "# await menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command(\"list\") async", "TagScript blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\", \"+\"]) async def slashtag_add(", "PastebinConverter, ): \"\"\" Add a slash tag with a Pastebin", "timed out.\") else: description = \"\" if command_type == ApplicationCommandType.CHAT_INPUT:", "import MessagePredicate from tabulate import tabulate from ..abc import MixinMeta", "about a slash tag that is stored on this server.\"\"\"", "slashtagset_addeval(self, ctx: commands.Context): \"\"\"Add a slash eval command for debugging.\"\"\"", "commands.Context): \"\"\"View stored slash tags.\"\"\" tags = self.guild_tag_cache[ctx.guild.id] if not", "option_type.lower() == \"choices\": choices = await self.get_choices(ctx) option_type = \"STRING\"", "repl = \"global \" name = match.group(0) repl += name", "except asyncio.TimeoutError: return await ctx.send(\"Timed out, not deleting slash tags.\")", "guild: discord.Guild = None): tags = self.guild_tag_cache[guild.id] if guild else", "def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Delete a slash", "not self.eval_command: return await ctx.send(\"The eval command hasn't been registered.\")", "Permission is hereby granted, free of charge, to any person", "ctx: commands.Context, id: int = None): \"\"\" Manually set the", "60, ) -> str: if pred is None: pred =", "option = await self.get_option(ctx, added_required=added_required) if not option.required: added_required =", "user command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\"", "each choice \" \"should be seperated by `|`. Example:\\n`dog:Doggo|cat:Catto`\" )", "ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Get a slash tag's raw", "tag: GuildTagConverter, argument: str ): \"\"\"Edit a single slash tag's", "tags: return await ctx.send(\"There are no slash tags on this", "\"global slash tags\" e.set_author(name=\"Global Slash Tags\", icon_url=ctx.me.avatar_url) else: slash_tags =", "to legacy bots. If you don't know what this means,", "global slash tags.\" ) return await ctx.send(message) counter = Counter({tag.name:", "argument names may not exceed 32 characters and can only", "= name_pred.result name, description = match.group(1), match.group(2) valid_option_types = [", "slash tag usage stats. **Example:** `[p]slashtag usage` \"\"\" await self.show_slash_tag_usage(ctx,", "slashtag_global_user( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter,", "all slash tags on this server? (Y/n)\", pred ) except", "and value \" \"weren't seperated by a `:`.\", delete_after=15, )", "slash tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await", "TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a user", "break if pred.result is False: break return options async def", "await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async def create_slash_tag( self, ctx:", "self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ): \"\"\"Edit", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "await ctx.send(\"`/eval` has been registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\") async def slashtagset_rmeval(self,", "TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript(", "None) @commands.is_owner() @commands.group(aliases=[\"slashset\"]) async def slashtagset(self, ctx: commands.Context): \"\"\"Manage SlashTags", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "delete_after=15, ) continue choice = ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice) if len(choices)", "cog loaded: {testing_enabled}\", ] embed = discord.Embed( color=0xC9C9C9, title=\"SlashTags Settings\",", "32 characters and can only contain characters \" \"that are", "arguments.\") else: if pred.result is True: await self.get_options(ctx, options) command", "delete_after=15, ) continue if not CHOICE_RE.match(choice_text): await ctx.send( f\"Failed to", "None await ctx.send(\"`/eval` has been deleted.\") @slashtagset.command(\"testing\") async def slashtagset_testing(self,", "\"This server has no slash tags.\" if guild else \"There", "this server.`\\n\", \"*Slash argument names may not exceed 32 characters", "= re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT = 25 log = logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match:", "arguments to this tag? (Y/n)\", pred ) except asyncio.TimeoutError: await", "*, description: str ): \"\"\"Edit a slash tag's description.\"\"\" await", "tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user) async def", "\"-\"]) async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Delete", "e.copy() embed.description = usage_chart embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\",", "discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command = None await ctx.send(\"`/eval` has", "TagScript.\"\"\" await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command(\"name\") async def slashtag_edit_name( self,", "else \"unloaded\" return await ctx.send(f\"The SlashTag interaction testing cog is", "no global slash tags.\" ) return await ctx.send(message) counter =", "discord.Guild = None): tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache", "added_required=added_required) if not option.required: added_required = True except asyncio.TimeoutError: await", "add as choices to \" \"the tag. Choice names and", "message command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\"", "Pastebin link. \"\"\" await self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\",", "slashtagset_rmeval(self, ctx: commands.Context): \"\"\"Remove the slash eval command.\"\"\" if not", "option_query = [ \"What should the argument type be?\", f\"Valid", "return await ctx.send(f\"The SlashTag interaction testing cog is already {loaded}.\")", "ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await ctx.send(await", "tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user) async def slashtag_global_user(", "GuildTagConverter): \"\"\"Delete a slash tag.\"\"\" await ctx.send(await tag.delete()) @slashtag.command(\"info\") async", "self, ctx: commands.Context, options: List[SlashOption] ) -> List[SlashOption]: added_required =", "ctx: commands.Context, tag: GlobalTagConverter, *, description: str ): await ctx.send(await", "argument was automatically made optional as the previous one was", "`:`.\", delete_after=15, ) continue if not CHOICE_RE.match(choice_text): await ctx.send( f\"Failed", "None: pred = MessagePredicate.same_context(ctx) ask = await ctx.send(query) try: message", "False for i in range(1, 11): try: option = await", "should be seperated by `:`, and each choice \" \"should", "commands.Context, tag: GuildTagConverter, argument: str ): await tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\",", "what this means, you don't need to worry about it.", "adding arguments.\") else: if pred.result is True: await self.get_options(ctx, options)", "(**{self.eval_command}**)\" if self.eval_command else \"❎\" testing_enabled = \"✅\" if self.testing_enabled", "ctx) await self.send_and_query_response(ctx, \"\\n\".join(name_desc), name_pred) match = name_pred.result name, description", "async def slashtagset_rmeval(self, ctx: commands.Context): \"\"\"Remove the slash eval command.\"\"\"", "These commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how", "DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"]) async def slashtag_usage(self, ctx: commands.Context): \"\"\" See", "= page embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)} {slash_tags}\") embeds.append(embed) # await menu(ctx,", "Software without restriction, including without limitation the rights to use,", "100 characters)\", pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError: return await ctx.send(\"Tag", "server.`\\n\", \"*Slash argument names may not exceed 32 characters and", "out, not deleting slash tags.\") if not pred.result: return await", "await button_menu(ctx, embeds) @slashtag.command(\"list\") async def slashtag_list(self, ctx: commands.Context): \"\"\"View", "continue if not CHOICE_RE.match(choice_text): await ctx.send( f\"Failed to parse `{choice_text}`", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "import Dict, List, Union import discord from redbot.core import commands", "TagName(check_global=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a slash tag", "hereby granted, free of charge, to any person obtaining a", "async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None): tags", "required = pred.result else: await ctx.send( \"This argument was automatically", "slash tags.\") guild: discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for", "if not name.startswith(\"SUB\") ] valid_option_types.append(\"choices\") option_query = [ \"What should", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "ctx: commands.Context): await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=[\"slashset\"]) async def slashtagset(self,", "target_state: loaded = \"Loaded\" self.add_test_cog() else: loaded = \"Unloaded\" self.remove_test_cog()", "this slash tag usage stats. **Example:** `[p]slashtag usage` \"\"\" await", "@copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument:", "ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user) async", "else: overriden.__doc__ = doc return overriden return decorator class Commands(MixinMeta):", "like I don't have permission to add Slash Commands here.", "\"\"\" Edit a slash tag's arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html)", "bot owners can use this.\", options=[ SlashOption(name=\"body\", description=\"Code body to", "slash tag's TagScript.\"\"\" await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command(\"name\") async def", "the command options is needed raise tag = SlashTag( self,", "Add a message command tag with TagScript. [Slash tag usage", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "Dict, List, Union import discord from redbot.core import commands from", "\"\"\" await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"restore\", hidden=True) async def slashtag_restore(self,", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "doc return overriden return decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"]) async", "Slash Tag management with TagScript. These commands use TagScriptEngine. [This", "get_options( self, ctx: commands.Context, options: List[SlashOption] ) -> List[SlashOption]: added_required", "TagScriptConverter, ): \"\"\" Add a user command tag with TagScript.", "@commands.check(dev_check) @slashtagset.command(\"addeval\") async def slashtagset_addeval(self, ctx: commands.Context): \"\"\"Add a slash", "commands.Context, tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a", "\"\"\" target_state = ( true_or_false if true_or_false is not None", "tag: GuildTagConverter): \"\"\" Edit a slash tag's arguments. See [this", "= [ f\"Application ID: **{self.application_id}**\", f\"Eval command: {eval_command}\", f\"Test cog", "await self.delete_quietly(message) return message.content async def get_choices(self, ctx: commands.Context) ->", "tags.copy().values()}) e = discord.Embed(title=\"Slash Tag Stats\", color=await ctx.embed_color()) embeds =", "title = f\"`{tag.type.get_prefix()}{tag.name}` - \" limit -= len(title) tagscript =", "limit: int = 60) -> str: title = f\"`{tag.type.get_prefix()}{tag.name}` -", "options.append(option) if i == 10: break pred = MessagePredicate.yes_or_no(ctx) try:", "None, *, timeout: int = 60, ) -> str: if", "\"Loaded\" self.add_test_cog() else: loaded = \"Unloaded\" self.remove_test_cog() await ctx.send(f\"{loaded} the", "TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): \"\"\" Add a message", "await ctx.send(\"There are no global slash tags.\") await self.view_slash_tags(ctx, tags,", "\"\"\"Edit a slash tag's TagScript.\"\"\" await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command(\"name\")", "Software is furnished to do so, subject to the following", "commands.Context): \"\"\"Clear all slash tags for this server.\"\"\" pred =", "commands.Context, *, tag: TagConverter): \"\"\"Get info about a slash tag", "tagscript = tagscript[:limit] + \"...\" tagscript = tagscript.replace(\"\\n\", \" \")", "testing cog is already {loaded}.\") await self.config.testing_enabled.set(target_state) if target_state: loaded", "ctx, \"Would you like to add arguments to this tag?", "included in all copies or substantial portions of the Software.", "send_and_query_response( self, ctx: commands.Context, query: str, pred: MessagePredicate = None,", "delete all slash tags on this server? (Y/n)\", pred )", "commands.Context): \"\"\"Manage SlashTags settings.\"\"\" @slashtagset.command(\"settings\") async def slashtagset_settings(self, ctx: commands.Context):", "member of this server.`\\n\", \"*Slash argument names may not exceed", "timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask) raise await self.delete_quietly(ask) await self.delete_quietly(message)", "name.\"\"\" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\") async def slashtag_edit_description( self, ctx:", "however info on the command options is needed raise tag", "not deleting slash tags.\") if not pred.result: return await ctx.send(\"Ok,", "await self.send_and_query_response( ctx, \"Are you sure you want to delete", "_sub(match: re.Match) -> str: if match.group(1): return \"[p]slashtag global\" repl", "description=description, guild_id=guild_id, options=options, type=command_type, ) try: await command.register() except discord.Forbidden", "tag.\"\"\" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async def slashtag_edit_tagscript( self, ctx:", "import MixinMeta from ..converters import ( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter,", "-> SlashOption: name_desc = [ \"What should the argument name", "name and value \" \"weren't seperated by a `:`.\", delete_after=15,", "@slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self, ctx: commands.Context, tag:", "] valid_option_types.append(\"choices\") option_query = [ \"What should the argument type", "a user command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)", "more information on slash tag arguments. \"\"\" await tag.edit_options(ctx) @slashtag_edit.command(\"argument\",", "ctx.send(query) try: message = await self.bot.wait_for(\"message\", check=pred, timeout=timeout) except asyncio.TimeoutError:", "ctx, \"What should the tag description to be? (maximum 100", "e = discord.Embed(title=\"Slash Tag Stats\", color=await ctx.embed_color()) embeds = []", "= doc return overriden return decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=[\"st\"])", "slashtag_global_message( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter,", "slashtag_global_list(self, ctx: commands.Context): tags = self.global_tag_cache if not tags: return", "timed out, not adding arguments.\") else: if pred.result is True:", "message = await self.bot.wait_for(\"message\", check=pred, timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask)", "@slashtagset.command(\"addeval\") async def slashtagset_addeval(self, ctx: commands.Context): \"\"\"Add a slash eval", "slashtag_info(self, ctx: commands.Context, *, tag: TagConverter): \"\"\"Get info about a", ") await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await ctx.send(\"`/eval`", "tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(", "option_type = SlashOptionType[option_type.upper()] if not added_required: pred = MessagePredicate.yes_or_no(ctx) await", "== \"choices\": choices = await self.get_choices(ctx) option_type = \"STRING\" else:", "ApplicationCommand( self, name=\"eval\", description=\"SlashTags debugging eval command. Only bot owners", "development test cog. \"\"\" target_state = ( true_or_false if true_or_false", "isinstance(overriden, commands.Command): overriden._help_override = doc else: overriden.__doc__ = doc return", "Tags\", icon_url=ctx.guild.icon_url) embeds = [] pages = list(pagify(description)) for index,", "def slashtag_usage(self, ctx: commands.Context): \"\"\" See this slash tag usage", "SlashTags settings.\"\"\" @slashtagset.command(\"settings\") async def slashtagset_settings(self, ctx: commands.Context): \"\"\"View SlashTags", "self.global_tag_cache if not tags: message = ( \"This server has", "): \"\"\"Edit a slash tag's name.\"\"\" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\")", "import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import MessagePredicate from tabulate import", "self.eval_command else \"❎\" testing_enabled = \"✅\" if self.testing_enabled else \"❎\"", "@slashtag.command(\"clear\", hidden=True) async def slashtag_clear(self, ctx: commands.Context): \"\"\"Clear all slash", "True except asyncio.TimeoutError: await ctx.send(\"Adding this argument timed out.\", delete_after=15)", "self, ctx: commands.Context, tags: Dict[int, SlashTag], *, is_global: bool, ):", "max choices ({CHOICE_LIMIT}).\") break return choices async def get_option( self,", "\"the tag. Choice names and values should be seperated by", "differs from the bot user ID. This only applies to", "{ctx.guild!r}\", exc_info=error ) text = ( \"Looks like I don't", "if not added_required: pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, \"Is", "valid_option_types.append(\"choices\") option_query = [ \"What should the argument type be?\",", "tagscript: TagScriptConverter, ): \"\"\" Add a user command tag with", "def slashtag_global_message( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript:", "*, description: str ): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments)", "query = ( \"Send the list of choice names and", "= await self.get_choices(ctx) option_type = \"STRING\" else: choices = []", "commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag(", "pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, \"Is this argument required?", "\"\"\" MIT License Copyright (c) 2020-present phenom4n4n Permission is hereby", "str: if match.group(1): return \"[p]slashtag global\" repl = \"global \"", "guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE )", "ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async def", "None): \"\"\" Load or unload the SlashTag interaction development test", "True: await self.get_options(ctx, options) command = ApplicationCommand( self, name=tag_name, description=description,", "tag in sorted(tags.values(), key=lambda t: t.name) ] description = \"\\n\".join(description)", "OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import asyncio import", "60) -> str: title = f\"`{tag.type.get_prefix()}{tag.name}` - \" limit -=", "async def slashtag_global_message( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *,", "tag's TagScript.\"\"\" await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command(\"name\") async def slashtag_edit_name(", "name and description should be split by a `:`.\", \"Example:", "notice and this permission notice shall be included in all", "evaluate.\", required=True) ], ) await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command =", "tagscript: TagScriptConverter, ): \"\"\" Add a message command tag with", "-> str: if pred is None: pred = MessagePredicate.same_context(ctx) ask", "- 3: tagscript = tagscript[:limit] + \"...\" tagscript = tagscript.replace(\"\\n\",", "def get_option( self, ctx: commands.Context, *, added_required: bool = False", "tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async def slashtag_user(", "tags\" e.set_author(name=\"Stored Slash Tags\", icon_url=ctx.guild.icon_url) embeds = [] pages =", "since error handler should print it, however info on the", "Union[commands.Command, types.FunctionType]): doc = original.help if isinstance(original, commands.Command) else original.__doc__", "not pred.result: return await ctx.send(\"Ok, not deleting slash tags.\") guild:", "info unneeded since error handler should print it, however info", "management with TagScript. These commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has", "`:`, and each choice \" \"should be seperated by `|`.", "tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"]) async def slashtag_edit_argument( self, ctx: commands.Context, tag:", "**{self.application_id}**\", f\"Eval command: {eval_command}\", f\"Test cog loaded: {testing_enabled}\", ] embed", "(Y/n)\", pred ) except asyncio.TimeoutError: return await ctx.send(\"Timed out, not", "*, tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True)", "this server.\"\"\" await tag.send_info(ctx) @slashtag.command(\"raw\") async def slashtag_raw(self, ctx: commands.Context,", "self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): await", "tags.\" if guild else \"There are no global slash tags.\"", "[This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use TagScript blocks.", "original.__doc__ doc = TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command): overriden._help_override =", "self.send_and_query_response( ctx, \"Would you like to add another argument? (Y/n)\",", ") return await ctx.send(text) except Exception: log.error(\"Failed to create command", "tag. Choice names and values should be seperated by `:`,", "10: break pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, \"Would", "option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async def slashtag_message( self,", "= pred.result else: await ctx.send( \"This argument was automatically made", "and/or sell copies of the Software, and to permit persons", "or '-'.\", \"The argument description must be less than or", "names and values you would like to add as choices", "TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE )", "raise await self.delete_quietly(ask) await self.delete_quietly(message) return message.content async def get_choices(self,", "\"Uses\")), \"prolog\") embed = e.copy() embed.description = usage_chart embeds.append(embed) await", "def get_options( self, ctx: commands.Context, options: List[SlashOption] ) -> List[SlashOption]:", "page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag arguments. \"\"\" await", "SlashTags settings.\"\"\" eval_command = f\"✅ (**{self.eval_command}**)\" if self.eval_command else \"❎\"", "tags, is_global=False) async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild =", "await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\")", "option_type = await self.send_and_query_response( ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if", "await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\", aliases=[\"++\"])", "tag_name: TagName(check_global=False), *, link: PastebinConverter, ): await self.create_slash_tag(ctx, tag_name, link,", "must also be optional.*\", pred, ) required = pred.result else:", "if guild else self.global_tag_cache if not tags: message = (", "await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\", \"-\"]) async def slashtag_remove(self,", "copy of this software and associated documentation files (the \"Software\"),", ") except asyncio.TimeoutError: return await ctx.send(\"Tag addition timed out.\") else:", "values should be seperated by `:`, and each choice \"", "slash tags.\") if not pred.result: return await ctx.send(\"Ok, not deleting", "\"\"\" import asyncio import logging import re import types from", "\"The argument description must be less than or equal to", "= discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags = \"global slash tags\"", "by name.\"\"\" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"remove\", aliases=[\"delete\", \"-\"]) async", "self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message) async def slashtag_global_message(", "ID for [botname] slash commands if it differs from the", "match = name_pred.result name, description = match.group(1), match.group(2) valid_option_types =", "PastebinConverter, TagConverter, TagName, TagScriptConverter, ) from ..http import ApplicationOptionChoice, SlashOptionType", "argument? (Y/n)\", pred ) except asyncio.TimeoutError: await ctx.send(\"Query timed out,", "= False ) -> SlashOption: name_desc = [ \"What should", "embed.description = usage_chart embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command(\"usage\", aliases=[\"stats\"])", "'-'.\", \"The argument description must be less than or equal", "\"choices\": choices = await self.get_choices(ctx) option_type = \"STRING\" else: choices", "await ctx.send(await tag.edit_description(description)) @slashtag_edit.command(\"arguments\", aliases=[\"options\"]) async def slashtag_edit_arguments(self, ctx: commands.Context,", "use this.\", options=[ SlashOption(name=\"body\", description=\"Code body to evaluate.\", required=True) ],", "\" \"that are alphanumeric or '_' or '-'.\", \"The argument", "if len(choices) >= CHOICE_LIMIT: await ctx.send(f\"Reached max choices ({CHOICE_LIMIT}).\") break", "slashtag_list(self, ctx: commands.Context): \"\"\"View stored slash tags.\"\"\" tags = self.guild_tag_cache[ctx.guild.id]", "restriction, including without limitation the rights to use, copy, modify,", "embeds = [] pages = list(pagify(description)) for index, page in", "= slasheval.id await ctx.send(\"`/eval` has been registered.\") @commands.check(dev_check) @slashtagset.command(\"rmeval\") async", "not adding additional arguments.\") break if pred.result is False: break", "in response.split(\"|\"): if \":\" not in choice_text: await ctx.send( f\"Failed", "self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=[\"slashset\"]) async def slashtagset(self, ctx: commands.Context): \"\"\"Manage", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "description to be? (maximum 100 characters)\", pred=MessagePredicate.length_less(101, ctx), ) except", "f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return await ctx.send(text) except Exception: log.error(\"Failed to create", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "ctx, \"Are you sure you want to delete all slash", "SlashOptionType from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus", "\"[p]slashtag global\" repl = \"global \" name = match.group(0) repl", "Only bot owners can use this.\", options=[ SlashOption(name=\"body\", description=\"Code body", "title=\"SlashTags Settings\", description=\"\\n\".join(description) ) await ctx.send(embed=embed) @slashtagset.command(\"appid\") async def slashtagset_appid(self,", "except asyncio.TimeoutError: await ctx.send(\"Query timed out, not adding additional arguments.\")", "commands.Context, tag: GuildTagConverter, argument: str ): \"\"\"Edit a single slash", "i in range(1, 11): try: option = await self.get_option(ctx, added_required=added_required)", "the 100 character limit.\", delete_after=15, ) continue choice = ApplicationOptionChoice(*choice_text.split(\":\",", "e.set_author(name=\"Global Slash Tags\", icon_url=ctx.me.avatar_url) else: slash_tags = \"slash tags\" e.set_author(name=\"Stored", "Manually set the application ID for [botname] slash commands if", "guild {ctx.guild!r}\") # exc info unneeded since error handler should", "is_global: slash_tags = \"global slash tags\" e.set_author(name=\"Global Slash Tags\", icon_url=ctx.me.avatar_url)", "out, not adding additional arguments.\") break if pred.result is False:", "self.send_and_query_response( ctx, \"Is this argument required? (Y/n)\\n*Keep in mind that", "pred, ) required = pred.result else: await ctx.send( \"This argument", "*, is_global: bool, ): description = [ self.format_tagscript(tag) for tag", "as its name and value \" \"weren't seperated by a", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "TagConverter, TagName, TagScriptConverter, ) from ..http import ApplicationOptionChoice, SlashOptionType from", "self.get_option(ctx, added_required=added_required) if not option.required: added_required = True except asyncio.TimeoutError:", "): await tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"]) @copy_doc(slashtag_remove) async def", "Exception: log.error(\"Failed to create command {command!r} on guild {ctx.guild!r}\") #", "commands.Context): await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=[\"slashset\"]) async def slashtagset(self, ctx:", "the Software, and to permit persons to whom the Software", "return await ctx.send(\"An eval command is already registered.\") slasheval =", "GuildTagConverter, argument: str ): await tag.edit_single_option(ctx, argument) @slashtag_global.command(\"remove\", aliases=[\"delete\", \"-\"])", "and associated documentation files (the \"Software\"), to deal in the", "options) command = ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type,", "from redbot.core import commands from redbot.core.utils.chat_formatting import box, humanize_list, inline,", "GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command(\"raw\") @copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx:", "\"\"\"Delete a slash tag.\"\"\" await ctx.send(await tag.delete()) @slashtag.command(\"info\") async def", "redbot.core.utils.menus import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import MessagePredicate from tabulate", "pred.result is True: await self.get_options(ctx, options) command = ApplicationCommand( self,", "eval command.\"\"\" if not self.eval_command: return await ctx.send(\"The eval command", "box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")), \"prolog\") embed = e.copy() embed.description = usage_chart", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "async def slashtag_global_add( self, ctx: commands.Context, tag_name: TagName(global_priority=True), *, tagscript:", "doc = TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command): overriden._help_override = doc", "slash tag arguments. \"\"\" await tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"]) async def", "@commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag) async def slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command(\"add\")", "self.bot.wait_for(\"message\", check=pred, timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask) raise await self.delete_quietly(ask)", "= e.copy() embed.description = usage_chart embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS)", "await self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) async", "usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async def", "pred = MessagePredicate.same_context(ctx) ask = await ctx.send(query) try: message =", "logging.getLogger(\"red.phenom4n4n.slashtags.commands\") def _sub(match: re.Match) -> str: if match.group(1): return \"[p]slashtag", "seperated by a `:`.\", delete_after=15, ) continue if not CHOICE_RE.match(choice_text):", "tag: GuildTagConverter, *, tagscript: TagScriptConverter ): \"\"\"Edit a slash tag's", "as button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE =", "repl += name if name.istitle(): repl = repl.title() return repl", "slash tags on this server.\") await self.view_slash_tags(ctx, tags, is_global=False) async", "slashtag_edit_description( self, ctx: commands.Context, tag: GuildTagConverter, *, description: str ):", "await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command(\"tagscript\") async def slashtag_edit_tagscript( self, ctx: commands.Context,", "] description = \"\\n\".join(description) e = discord.Embed(color=await ctx.embed_color()) if is_global:", "query) choices = [] for choice_text in response.split(\"|\"): if \":\"", "else \"There are no global slash tags.\" ) return await", "if pred.result is True: await self.get_options(ctx, options) command = ApplicationCommand(", "server? (Y/n)\", pred ) except asyncio.TimeoutError: return await ctx.send(\"Timed out,", "try again: <https://discordapp.com/oauth2/authorize\" f\"?client_id={self.bot.user.id}&scope=bot%20applications.commands>\" ) return await ctx.send(text) except Exception:", "<reponame>Myst1c-a/phen-cogs<filename>slashtags/mixins/commands.py<gh_stars>0 \"\"\" MIT License Copyright (c) 2020-present phenom4n4n Permission is", "blocks. \"\"\" @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\", \"+\"]) async def slashtag_add( self,", "for usage_data in chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")),", "t.name) ] description = \"\\n\".join(description) e = discord.Embed(color=await ctx.embed_color()) if", "ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] = [] guild_id = None if", "def slashtag_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ):", "this software and associated documentation files (the \"Software\"), to deal", "import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\")", "description = match.group(1), match.group(2) valid_option_types = [ name.lower() for name", "await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"clear\", hidden=True) async def slashtag_clear(self, ctx:", "tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send(\"Tags deleted.\") @commands.is_owner()", "if command_type == ApplicationCommandType.CHAT_INPUT: try: description = await self.send_and_query_response( ctx,", "if is_global: slash_tags = \"global slash tags\" e.set_author(name=\"Global Slash Tags\",", "async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\",", "all copies or substantial portions of the Software. THE SOFTWARE", "await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command(\"usage\", aliases=[\"stats\"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self,", "argument name and description should be split by a `:`.\",", "Choice names and values should be seperated by `:`, and", "async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter): \"\"\" Edit a", "await self.delete_quietly(ask) await self.delete_quietly(message) return message.content async def get_choices(self, ctx:", "@commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"message\") async def slashtag_message( self, ctx: commands.Context, tag_name: TagName(check_global=False,", "await ctx.send(f\"Application ID set to `{id}`.\") @commands.check(dev_check) @slashtagset.command(\"addeval\") async def", "commands.Context, tag: GlobalTagConverter, *, name: TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name))", "is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\") @copy_doc(slashtag_message) async def slashtag_global_message( self, ctx: commands.Context,", "response.split(\"|\"): if \":\" not in choice_text: await ctx.send( f\"Failed to", "ctx.send(\"Query timed out, not adding additional arguments.\") break if pred.result", "ctx.send(\"`/eval` has been deleted.\") @slashtagset.command(\"testing\") async def slashtagset_testing(self, ctx: commands.Context,", "following conditions: The above copyright notice and this permission notice", "\" name = match.group(0) repl += name if name.istitle(): repl", "argument timed out.\", delete_after=15) break options.append(option) if i == 10:", "ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx,", "exceeded the 100 character limit.\", delete_after=15, ) continue choice =", "copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]): doc = original.help", "options=options, type=command_type, ) try: await command.register() except discord.Forbidden as error:", "commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx, tag,", "command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\") async def slashtag_user( self, ctx: commands.Context,", "import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus import menu as", "ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE = re.compile(r\"(?i)(\\[p\\])?\\b(slash\\s?)?tag'?s?\\b\") CHOICE_RE = re.compile(r\".{1,100}:.{1,100}\") CHOICE_LIMIT", "information on slash tag arguments. \"\"\" await tag.edit_options(ctx) @slashtag_edit.command(\"argument\", aliases=[\"option\"])", "= ( \"Looks like I don't have permission to add", "add Slash Commands here. Reinvite me \" \"with this invite", "def slashtag_global_user( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript:", "str ): \"\"\"Edit a single slash tag's argument by name.\"\"\"", "a slash tag's arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more", "\"*Slash argument names may not exceed 32 characters and can", "not in choice_text: await ctx.send( f\"Failed to parse `{choice_text}` to", "await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self,", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "@slashtag_edit.command(\"name\") async def slashtag_edit_name( self, ctx: commands.Context, tag: GuildTagConverter, *,", "{loaded}.\") await self.config.testing_enabled.set(target_state) if target_state: loaded = \"Loaded\" self.add_test_cog() else:", "tagscript: TagScriptConverter, ): \"\"\" Add a slash tag with TagScript.", "= ( true_or_false if true_or_false is not None else not", "@commands.group(aliases=[\"st\"]) async def slashtag(self, ctx: commands.Context): \"\"\" Slash Tag management", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "\" \"its name or value exceeded the 100 character limit.\",", "tag_name: TagName(check_global=False), *, link: PastebinConverter, ): \"\"\" Add a slash", "commands.Context, tag: GlobalTagConverter, *, description: str ): await ctx.send(await tag.edit_description(description))", "Example:\\n`dog:Doggo|cat:Catto`\" ) response = await self.send_and_query_response(ctx, query) choices = []", "is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user) async def slashtag_global_user( self,", "tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self, ctx: commands.Context, tag:", "self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await", "async def slashtag_user( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *,", "import discord from redbot.core import commands from redbot.core.utils.chat_formatting import box,", "async def slashtag_usage(self, ctx: commands.Context): \"\"\" See this slash tag", "= \"STRING\" else: choices = [] option_type = SlashOptionType[option_type.upper()] if", "not tags: return await ctx.send(\"There are no global slash tags.\")", "a slash tag.\"\"\" await ctx.send(await tag.delete()) @slashtag.command(\"info\") async def slashtag_info(self,", "copies or substantial portions of the Software. THE SOFTWARE IS", "ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): \"\"\"", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "\"❎\" testing_enabled = \"✅\" if self.testing_enabled else \"❎\" description =", "have permission to add Slash Commands here. Reinvite me \"", "Tag Stats\", color=await ctx.embed_color()) embeds = [] for usage_data in", "ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async", "commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command(\"restore\", hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx:", ") @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"user\") @copy_doc(slashtag_user) async def slashtag_global_user( self, ctx: commands.Context,", "\"\"\"Manage SlashTags settings.\"\"\" @slashtagset.command(\"settings\") async def slashtagset_settings(self, ctx: commands.Context): \"\"\"View", "or unload the SlashTag interaction development test cog. \"\"\" target_state", "than or equal to 100 characters.*\", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION,", "await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=[\"slashset\"]) async def slashtagset(self, ctx: commands.Context):", "a slash tag that is stored on this server.\"\"\" await", "len(choices) >= CHOICE_LIMIT: await ctx.send(f\"Reached max choices ({CHOICE_LIMIT}).\") break return", "all following arguments must also be optional.*\", pred, ) required", "await ctx.send(\"`/eval` has been deleted.\") @slashtagset.command(\"testing\") async def slashtagset_testing(self, ctx:", "ctx: commands.Context, tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter, ): \"\"\" Add", "@commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"add\", aliases=[\"create\", \"+\"]) async def slashtag_add( self, ctx: commands.Context,", "embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)} {slash_tags}\") embeds.append(embed) # await menu(ctx, embeds, DEFAULT_CONTROLS)", "{len(tags)} {slash_tags}\") embeds.append(embed) # await menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx,", "to this tag? (Y/n)\", pred ) except asyncio.TimeoutError: await ctx.send(\"Query", "import copy from typing import Dict, List, Union import discord", "*, is_global: bool = False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ):", "GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter, ) from ..http import", "self, ctx: commands.Context, tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter, ): await", "to worry about it. \"\"\" app_id = id or self.bot.user.id", "TagScriptConverter, ) from ..http import ApplicationOptionChoice, SlashOptionType from ..objects import", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "commands.Command) else original.__doc__ doc = TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command):", "discord.Forbidden as error: log.error( \"Failed to create command {command!r} on", "If you don't know what this means, you don't need", "TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) \"\"\" await self.create_slash_tag( ctx, tag_name,", "should print it, however info on the command options is", "choice = ApplicationOptionChoice(*choice_text.split(\":\", 1)) choices.append(choice) if len(choices) >= CHOICE_LIMIT: await", "= \"\\n\".join(description) e = discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags =", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "are no slash tags on this server.\") await self.view_slash_tags(ctx, tags,", "commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ): \"\"\"Edit a slash", "[] for usage_data in chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data, headers=(\"Tag\",", "mind that if you choose to make this argument optional,", "aliases=[\"option\"]) async def slashtag_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument:", "await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await ctx.send(\"`/eval` has", "match.group(0) repl += name if name.istitle(): repl = repl.title() return", "tagscript = tagscript.replace(\"\\n\", \" \") return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def view_slash_tags(", "commands.Context, *, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list) async def", "add another argument? (Y/n)\", pred ) except asyncio.TimeoutError: await ctx.send(\"Query", ") if option_type.lower() == \"choices\": choices = await self.get_choices(ctx) option_type", "in chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data, headers=(\"Tag\", \"Uses\")), \"prolog\") embed", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "name=\"eval\", description=\"SlashTags debugging eval command. Only bot owners can use", "self.http.put_guild_slash_commands(guild.id, []) for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag", "self, ctx: commands.Context, tag: GuildTagConverter, *, description: str ): \"\"\"Edit", "See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag", "TagName(check_global=False), *, link: PastebinConverter, ): await self.create_slash_tag(ctx, tag_name, link, is_global=True)", "@slashtag.command(\"usage\", aliases=[\"stats\"]) async def slashtag_usage(self, ctx: commands.Context): \"\"\" See this", "async def slashtag_edit( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript:", "e.copy() embed.description = page embed.set_footer(text=f\"{index}/{len(pages)} | {len(tags)} {slash_tags}\") embeds.append(embed) #", "the list of choice names and values you would like", "await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command(\"arguments\", aliases=[\"options\"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx:", "name, description = match.group(1), match.group(2) valid_option_types = [ name.lower() for", "ctx.send( f\"Failed to parse `{choice_text}` to a choice as its", "is_global=True) @slashtag_global.group(\"edit\", aliases=[\"e\"], invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit( self, ctx:", "a `:`.\", \"Example: `member:A member of this server.`\\n\", \"*Slash argument", "IN THE SOFTWARE. \"\"\" import asyncio import logging import re", "(Y/n)\\n*Keep in mind that if you choose to make this", "def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter): \"\"\" Edit a slash", "DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command(\"list\") async def slashtag_list(self, ctx: commands.Context):", "commands.Context, tags: Dict[int, SlashTag], *, is_global: bool, ): description =", "import Counter from copy import copy from typing import Dict,", "if command_type == ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response(", "SlashOption: name_desc = [ \"What should the argument name be", "[ name.lower() for name in SlashOptionType.__members__.keys() if not name.startswith(\"SUB\") ]", "except Exception: log.error(\"Failed to create command {command!r} on guild {ctx.guild!r}\")", "optional.*\", pred, ) required = pred.result else: await ctx.send( \"This", "def slashtag_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter,", "ctx: commands.Context, tag: GuildTagConverter): \"\"\" Edit a slash tag's arguments.", "Slash Tags\", icon_url=ctx.me.avatar_url) else: slash_tags = \"slash tags\" e.set_author(name=\"Stored Slash", "choices = await self.get_choices(ctx) option_type = \"STRING\" else: choices =", "tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin(", "@copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx)", "tag.edit_tagscript(tagscript)) @slashtag_global_edit.command(\"tagscript\") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self, ctx: commands.Context, tag:", "None): \"\"\" Manually set the application ID for [botname] slash", "import types from collections import Counter from copy import copy", "@slashtag.command(\"user\") async def slashtag_user( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False),", "not option.required: added_required = True except asyncio.TimeoutError: await ctx.send(\"Adding this", "not adding arguments.\") else: if pred.result is True: await self.get_options(ctx,", "tags: message = ( \"This server has no slash tags.\"", "ctx: commands.Context): tags = self.global_tag_cache if not tags: return await", "guild_id = None if is_global else ctx.guild.id if command_type ==", "else: choices = [] option_type = SlashOptionType[option_type.upper()] if not added_required:", "know what this means, you don't need to worry about", "self, ctx: commands.Context, tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter, ): \"\"\"", "tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list) async def slashtag_global_list(self, ctx:", "tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command(\"message\")", "tag's raw content.\"\"\" await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag: SlashTag,", "repl = repl.title() return repl def copy_doc(original: Union[commands.Command, types.FunctionType]): def", "= \"\" if command_type == ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try:", "with a Pastebin link. \"\"\" await self.create_slash_tag(ctx, tag_name, link, is_global=False)", "def slashtagset_appid(self, ctx: commands.Context, id: int = None): \"\"\" Manually", "@slashtag.command(\"raw\") async def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter): \"\"\"Get", "in sorted(tags.values(), key=lambda t: t.name) ] description = \"\\n\".join(description) e", "the database.\"\"\" await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"clear\", hidden=True) async def", "GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command(\"list\") @copy_doc(slashtag_list) async def slashtag_global_list(self, ctx: commands.Context):", "await self.config.application_id.set(app_id) self.application_id = app_id await ctx.send(f\"Application ID set to", "tags: return await ctx.send(\"There are no global slash tags.\") await", "menu as button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE", "def slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=[\"slashset\"]) async", "ctx.send(embed=embed) @slashtagset.command(\"appid\") async def slashtagset_appid(self, ctx: commands.Context, id: int =", "tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command(\"pastebin\", aliases=[\"++\"]) @copy_doc(slashtag_pastebin) async def", "free of charge, to any person obtaining a copy of", "ctx.send(\"There are no slash tags on this server.\") await self.view_slash_tags(ctx,", "tag.edit_name(name)) @slashtag_global_edit.command(\"description\") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self, ctx: commands.Context, tag:", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "ctx.send(\"Adding this argument timed out.\", delete_after=15) break options.append(option) if i", "try: await self.send_and_query_response( ctx, \"Would you like to add another", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "repl.title() return repl def copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command,", "= tagscript.replace(\"\\n\", \" \") return f\"{title}{discord.utils.escape_markdown(tagscript)}\" async def view_slash_tags( self,", "ctx: commands.Context, tag: GuildTagConverter, *, description: str ): \"\"\"Edit a", "can use this.\", options=[ SlashOption(name=\"body\", description=\"Code body to evaluate.\", required=True)", "Software, and to permit persons to whom the Software is", "({CHOICE_LIMIT}).\") break return choices async def get_option( self, ctx: commands.Context,", "slashtag(self, ctx: commands.Context): \"\"\" Slash Tag management with TagScript. These", "**Example:** `[p]slashtag usage` \"\"\" await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command(\"restore\", hidden=True)", "ctx: commands.Context, options: List[SlashOption] ) -> List[SlashOption]: added_required = False", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "import asyncio import logging import re import types from collections", "@slashtag.command(\"info\") async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter): \"\"\"Get", "tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"pastebin\", aliases=[\"++\"]) async def", "would like to add as choices to \" \"the tag.", "self.send_and_query_response( ctx, \"Would you like to add arguments to this", "\"Would you like to add another argument? (Y/n)\", pred )", "return options async def send_and_query_response( self, ctx: commands.Context, query: str,", "\"\"\" See this slash tag usage stats. **Example:** `[p]slashtag usage`", "async def slashtag_edit_description( self, ctx: commands.Context, tag: GuildTagConverter, *, description:", "*, tagscript: TagScriptConverter, ): \"\"\" Add a user command tag", "): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command(\"name\") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name(", "ctx: commands.Context, *, tag: TagConverter): \"\"\"Get info about a slash", "if self.eval_command else \"❎\" testing_enabled = \"✅\" if self.testing_enabled else", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "ctx: commands.Context): \"\"\"Add a slash eval command for debugging.\"\"\" if", "@copy_doc(slashtag_list) async def slashtag_global_list(self, ctx: commands.Context): tags = self.global_tag_cache if", "deleting slash tags.\") guild: discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id, [])", "aliases=[\"options\"]) async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter): \"\"\" Edit", "ctx.send(\"The eval command hasn't been registered.\") try: await self.http.remove_slash_command(self.eval_command) except", "slashtag_edit( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ):", "= None): \"\"\" Manually set the application ID for [botname]", "enumerate(pages, 1): embed = e.copy() embed.description = page embed.set_footer(text=f\"{index}/{len(pages)} |", "async def slashtag_global_edit_description( self, ctx: commands.Context, tag: GlobalTagConverter, *, description:", "ctx: commands.Context): \"\"\"View stored slash tags.\"\"\" tags = self.guild_tag_cache[ctx.guild.id] if", "tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx,", ") except asyncio.TimeoutError: await ctx.send(\"Query timed out, not adding additional", "eval command for debugging.\"\"\" if self.eval_command: return await ctx.send(\"An eval", "\"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower() == \"choices\": choices =", "tags on this server? (Y/n)\", pred ) except asyncio.TimeoutError: return", "to make this argument optional, all following arguments must also", "tag: TagConverter): \"\"\"Get info about a slash tag that is", "true_or_false is not None else not await self.config.testing_enabled() ) if", "tag: SlashTag, limit: int = 60) -> str: title =", "= await self.send_and_query_response( ctx, \"\\n\".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower()", "characters and can only contain characters \" \"that are alphanumeric", "int = 60) -> str: title = f\"`{tag.type.get_prefix()}{tag.name}` - \"", "ctx.send(\"Tags deleted.\") @commands.is_owner() @slashtag.group(\"global\") @copy_doc(slashtag) async def slashtag_global(self, ctx: commands.Context):", "for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await", "of charge, to any person obtaining a copy of this", "body to evaluate.\", required=True) ], ) await slasheval.register() await self.config.eval_command.set(slasheval.id)", "already {loaded}.\") await self.config.testing_enabled.set(target_state) if target_state: loaded = \"Loaded\" self.add_test_cog()", "pred: MessagePredicate = None, *, timeout: int = 60, )", "The above copyright notice and this permission notice shall be", "no slash tags on this server.\") await self.view_slash_tags(ctx, tags, is_global=False)", "for index, page in enumerate(pages, 1): embed = e.copy() embed.description", "= tag.tagscript if len(tagscript) > limit - 3: tagscript =", "@copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self, ctx: commands.Context, tag: GlobalTagConverter, *,", "@slashtag.command(\"message\") async def slashtag_message( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False),", "ctx: commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command(\"argument\", aliases=[\"option\"]) @copy_doc(slashtag_edit_argument) async", "\"Example: `member:A member of this server.`\\n\", \"*Slash argument names may", "true_or_false: bool = None): \"\"\" Load or unload the SlashTag", "associated documentation files (the \"Software\"), to deal in the Software", "await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command(\"user\")", "tag's name.\"\"\" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command(\"description\") async def slashtag_edit_description( self,", "deleting slash tags.\") if not pred.result: return await ctx.send(\"Ok, not", "name: TagName(check_global=False) ): \"\"\"Edit a slash tag's name.\"\"\" await ctx.send(await" ]
[ "[::-1]: l1 = i+l1 print(l1) if str1 == l1: print(\"string", "str1 == l1: print(\"string is a palindrome\") else : print(\"string", "string :\") l1 =\"\" for i in str1 [::-1]: l1", "for i in str1 [::-1]: l1 = i+l1 print(l1) if", "if str1 == l1: print(\"string is a palindrome\") else :", "== l1: print(\"string is a palindrome\") else : print(\"string is", ":\") l1 =\"\" for i in str1 [::-1]: l1 =", "l1 = i+l1 print(l1) if str1 == l1: print(\"string is", "l1: print(\"string is a palindrome\") else : print(\"string is not", "is a palindrome\") else : print(\"string is not a palindrome\")", "l1 =\"\" for i in str1 [::-1]: l1 = i+l1", "=\"\" for i in str1 [::-1]: l1 = i+l1 print(l1)", "print(\"string is a palindrome\") else : print(\"string is not a", "str1 [::-1]: l1 = i+l1 print(l1) if str1 == l1:", "i+l1 print(l1) if str1 == l1: print(\"string is a palindrome\")", "a string :\") l1 =\"\" for i in str1 [::-1]:", "str1= input(\"enter a string :\") l1 =\"\" for i in", "input(\"enter a string :\") l1 =\"\" for i in str1", "i in str1 [::-1]: l1 = i+l1 print(l1) if str1", "= i+l1 print(l1) if str1 == l1: print(\"string is a", "in str1 [::-1]: l1 = i+l1 print(l1) if str1 ==", "print(l1) if str1 == l1: print(\"string is a palindrome\") else" ]
[ "in response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url = self.base_url + xp_url.extract() yield Request(tmp_url.replace(\"EN\",", "response.meta.get('prd_data', {}) package = first(packages, {}) if package: d['brand'] =", "and catalog_price.get('Value'), 'currency': catalog_price and strip(catalog_price.get('Currency')), 'attrs': json.dumps(package_obj), } yield", "next_page), callback=self.parse_list) def parse_package(self, response): s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages", "string import ascii_uppercase from time import time from urllib.parse import", "import ascii_uppercase from time import time from urllib.parse import urljoin", "'img_url': img_url and urljoin(response.url, img_url), } data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid", "start_urls = map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def", "= response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if next_page: yield Request(urljoin(response.url, next_page), callback=self.parse_list) def", "= strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url = div.xpath('.//a[@class=\"name\"]/@href').get() img_url = div.xpath('.//img/@src').get() d", "mulu_text']\") brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url = div.xpath('.//a[@class=\"name\"]/@href').get() img_url =", "= re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages = json.loads(s) d = response.meta.get('prd_data', {})", "re from string import ascii_uppercase from time import time from", "= package_obj.get(\"CatalogPrice\", {}) dd = { 'brand': d.get('brand'), 'cat_no': d.get('cat_no'),", "img_url), } data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid,", "= xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u\"\",", "\"http://www.jkchemical.com\" start_urls = map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\"", "parse(self, response): for xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url = self.base_url", "def parse_list(self, response): xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box in xp_boxes:", "urljoin import scrapy from more_itertools import first from scrapy import", "next_page: yield Request(urljoin(response.url, next_page), callback=self.parse_list) def parse_package(self, response): s =", "rel_url = div.xpath('.//a[@class=\"name\"]/@href').get() img_url = div.xpath('.//img/@src').get() d = { 'brand':", "xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url = div.xpath('.//a[@class=\"name\"]/@href').get() img_url", "div.xpath('.//a[@class=\"name\"]/@href').get() img_url = div.xpath('.//img/@src').get() d = { 'brand': brand.replace('-', '')", "response): s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages = json.loads(s) d =", "div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url and urljoin(response.url, rel_url),", "if next_page: yield Request(urljoin(response.url, next_page), callback=self.parse_list) def parse_package(self, response): s", "'prd_url': rel_url and urljoin(response.url, rel_url), 'img_url': img_url and urljoin(response.url, img_url),", "product_spider.utils.functions import strip class JkPrdSpider(scrapy.Spider): name = \"jk\" allowed_domains =", "urljoin(response.url, img_url), } data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield", "\"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self, response): for xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url", "strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url = div.xpath('.//a[@class=\"name\"]/@href').get() img_url = div.xpath('.//img/@src').get() d =", "{}) package = first(packages, {}) if package: d['brand'] = d['brand']", "urljoin(response.url, rel_url), 'img_url': img_url and urljoin(response.url, img_url), } data_jkid =", "= json.loads(s) d = response.meta.get('prd_data', {}) package = first(packages, {})", "import re from string import ascii_uppercase from time import time", "name = \"jk\" allowed_domains = [\"jkchemical.com\"] base_url = \"http://www.jkchemical.com\" start_urls", "in xp_boxes: div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '')", "import Request from product_spider.items import JkProduct, JKPackage from product_spider.utils.functions import", "Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u\"\", meta={\"prd_data\": d}, callback=self.parse_package) next_page = response.xpath('//a[contains(text(),", "yield Request(urljoin(response.url, next_page), callback=self.parse_list) def parse_package(self, response): s = re.findall(r\"(?<=\\().+(?=\\))\",", "re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages = json.loads(s) d = response.meta.get('prd_data', {}) package", "meta={\"prd_data\": d}, callback=self.parse_package) next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if next_page: yield", "def parse(self, response): for xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url =", "parse_list(self, response): xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box in xp_boxes: div", "'price': catalog_price and catalog_price.get('Value'), 'currency': catalog_price and strip(catalog_price.get('Currency')), 'attrs': json.dumps(package_obj),", "for package_obj in packages: catalog_price = package_obj.get(\"CatalogPrice\", {}) dd =", "xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url = self.base_url + xp_url.extract() yield", "package = first(packages, {}) if package: d['brand'] = d['brand'] or", "strip class JkPrdSpider(scrapy.Spider): name = \"jk\" allowed_domains = [\"jkchemical.com\"] base_url", "and urljoin(response.url, img_url), } data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get()", "first(packages, {}) if package: d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName')", "strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url and urljoin(response.url, rel_url), 'img_url': img_url", "'cat_no': d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"), 'price': catalog_price and catalog_price.get('Value'), 'currency': catalog_price", "from product_spider.utils.functions import strip class JkPrdSpider(scrapy.Spider): name = \"jk\" allowed_domains", "time from urllib.parse import urljoin import scrapy from more_itertools import", "self.base_url + xp_url.extract() yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list) def parse_list(self, response):", "'brand': d.get('brand'), 'cat_no': d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"), 'price': catalog_price and catalog_price.get('Value'),", "data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u\"\", meta={\"prd_data\": d},", "Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list) def parse_list(self, response): xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for", "= d['brand'] or package.get('Product', {}).get('BrandName') yield JkProduct(**d) for package_obj in", "ascii_uppercase from time import time from urllib.parse import urljoin import", "response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url = self.base_url + xp_url.extract() yield Request(tmp_url.replace(\"EN\", \"CH\"),", "import strip class JkPrdSpider(scrapy.Spider): name = \"jk\" allowed_domains = [\"jkchemical.com\"]", "d['brand'] or package.get('Product', {}).get('BrandName') yield JkProduct(**d) for package_obj in packages:", "response.text)[0] packages = json.loads(s) d = response.meta.get('prd_data', {}) package =", "catalog_price and catalog_price.get('Value'), 'currency': catalog_price and strip(catalog_price.get('Currency')), 'attrs': json.dumps(package_obj), }", "= [\"jkchemical.com\"] base_url = \"http://www.jkchemical.com\" start_urls = map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x),", "data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())),", "response): for xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url = self.base_url +", "if package: d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName') yield JkProduct(**d)", "xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box in xp_boxes: div = xp_box.xpath(\".//div[2][@class='left_right", "catalog_price = package_obj.get(\"CatalogPrice\", {}) dd = { 'brand': d.get('brand'), 'cat_no':", "Request from product_spider.items import JkProduct, JKPackage from product_spider.utils.functions import strip", "\"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url and urljoin(response.url, rel_url), 'img_url': img_url and", "yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u\"\", meta={\"prd_data\": d}, callback=self.parse_package) next_page =", "and urljoin(response.url, rel_url), 'img_url': img_url and urljoin(response.url, img_url), } data_jkid", "from more_itertools import first from scrapy import Request from product_spider.items", "div.xpath('.//img/@src').get() d = { 'brand': brand.replace('-', '') or None, \"purity\":", "{ 'brand': d.get('brand'), 'cat_no': d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"), 'price': catalog_price and", "time import time from urllib.parse import urljoin import scrapy from", "brand.replace('-', '') or None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(),", "None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\":", "urllib.parse import urljoin import scrapy from more_itertools import first from", "rel_url and urljoin(response.url, rel_url), 'img_url': img_url and urljoin(response.url, img_url), }", "= xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u\"\", meta={\"prd_data\": d}, callback=self.parse_package)", "json.loads(s) d = response.meta.get('prd_data', {}) package = first(packages, {}) if", "div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url =", "div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url':", "\"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url and urljoin(response.url, rel_url), 'img_url':", "brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url = div.xpath('.//a[@class=\"name\"]/@href').get() img_url = div.xpath('.//img/@src').get()", "base_url = \"http://www.jkchemical.com\" start_urls = map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url", "map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self, response):", "toa']//a/@href\"): tmp_url = self.base_url + xp_url.extract() yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list)", "def parse_package(self, response): s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages = json.loads(s)", "package_obj in packages: catalog_price = package_obj.get(\"CatalogPrice\", {}) dd = {", "= response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box in xp_boxes: div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\")", "+ xp_url.extract() yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list) def parse_list(self, response): xp_boxes", "from scrapy import Request from product_spider.items import JkProduct, JKPackage from", "\"jk\" allowed_domains = [\"jkchemical.com\"] base_url = \"http://www.jkchemical.com\" start_urls = map(lambda", "= \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self, response): for xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"):", "= xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url = div.xpath('.//a[@class=\"name\"]/@href').get()", "= first(packages, {}) if package: d['brand'] = d['brand'] or package.get('Product',", "JkPrdSpider(scrapy.Spider): name = \"jk\" allowed_domains = [\"jkchemical.com\"] base_url = \"http://www.jkchemical.com\"", "import first from scrapy import Request from product_spider.items import JkProduct,", "rel_url), 'img_url': img_url and urljoin(response.url, img_url), } data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get()", "or None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()),", "strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url and", "'') rel_url = div.xpath('.//a[@class=\"name\"]/@href').get() img_url = div.xpath('.//img/@src').get() d = {", "s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages = json.loads(s) d = response.meta.get('prd_data',", "prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self, response): for xp_url in response.xpath(\"//div[@class='yy", "product_spider.items import JkProduct, JKPackage from product_spider.utils.functions import strip class JkPrdSpider(scrapy.Spider):", "scrapy from more_itertools import first from scrapy import Request from", "xp_boxes: div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(), '') rel_url", "ascii_uppercase) prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self, response): for xp_url in", "body=u\"\", meta={\"prd_data\": d}, callback=self.parse_package) next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if next_page:", "in packages: catalog_price = package_obj.get(\"CatalogPrice\", {}) dd = { 'brand':", "d.get('brand'), 'cat_no': d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"), 'price': catalog_price and catalog_price.get('Value'), 'currency':", "import urljoin import scrapy from more_itertools import first from scrapy", "d}, callback=self.parse_package) next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if next_page: yield Request(urljoin(response.url,", "\"下一页\")]/@href').get() if next_page: yield Request(urljoin(response.url, next_page), callback=self.parse_list) def parse_package(self, response):", "response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if next_page: yield Request(urljoin(response.url, next_page), callback=self.parse_list) def parse_package(self,", "import time from urllib.parse import urljoin import scrapy from more_itertools", "\"CH\"), callback=self.parse_list) def parse_list(self, response): xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box", "scrapy import Request from product_spider.items import JkProduct, JKPackage from product_spider.utils.functions", "img_url and urljoin(response.url, img_url), } data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid =", "for xp_box in xp_boxes: div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand =", "= \"http://www.jkchemical.com\" start_urls = map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url =", "json import re from string import ascii_uppercase from time import", "x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self, response): for", "JKPackage from product_spider.utils.functions import strip class JkPrdSpider(scrapy.Spider): name = \"jk\"", "allowed_domains = [\"jkchemical.com\"] base_url = \"http://www.jkchemical.com\" start_urls = map(lambda x:", "\"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()),", "next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if next_page: yield Request(urljoin(response.url, next_page), callback=self.parse_list)", "[\"jkchemical.com\"] base_url = \"http://www.jkchemical.com\" start_urls = map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase)", "= { 'brand': d.get('brand'), 'cat_no': d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"), 'price': catalog_price", "{ 'brand': brand.replace('-', '') or None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()),", "response): xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box in xp_boxes: div =", "\"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url", "from urllib.parse import urljoin import scrapy from more_itertools import first", "import json import re from string import ascii_uppercase from time", "= self.base_url + xp_url.extract() yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list) def parse_list(self,", "packages = json.loads(s) d = response.meta.get('prd_data', {}) package = first(packages,", "'') or None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\":", "{}).get('BrandName') yield JkProduct(**d) for package_obj in packages: catalog_price = package_obj.get(\"CatalogPrice\",", "callback=self.parse_package) next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if next_page: yield Request(urljoin(response.url, next_page),", "response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box in xp_boxes: div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand", "xp_box in xp_boxes: div = xp_box.xpath(\".//div[2][@class='left_right mulu_text']\") brand = strip(div.xpath('.//li[@id=\"ctl00_cph_Content_li_lt_Brand\"]/text()').get(),", "d = { 'brand': brand.replace('-', '') or None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(),", "or package.get('Product', {}).get('BrandName') yield JkProduct(**d) for package_obj in packages: catalog_price", "d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"), 'price': catalog_price and catalog_price.get('Value'), 'currency': catalog_price and", "import scrapy from more_itertools import first from scrapy import Request", "from time import time from urllib.parse import urljoin import scrapy", "packages: catalog_price = package_obj.get(\"CatalogPrice\", {}) dd = { 'brand': d.get('brand'),", "callback=self.parse_list) def parse_list(self, response): xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\") for xp_box in", "= map(lambda x: \"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self,", "{}) if package: d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName') yield", "yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list) def parse_list(self, response): xp_boxes = response.xpath(\"//table[@id]//div[@class='PRODUCT_box']\")", "strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url and urljoin(response.url, rel_url), 'img_url': img_url and urljoin(response.url,", "Request(urljoin(response.url, next_page), callback=self.parse_list) def parse_package(self, response): s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0]", "parse_package(self, response): s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages = json.loads(s) d", "d = response.meta.get('prd_data', {}) package = first(packages, {}) if package:", "{}) dd = { 'brand': d.get('brand'), 'cat_no': d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"),", "= response.meta.get('prd_data', {}) package = first(packages, {}) if package: d['brand']", "ts=int(time())), body=u\"\", meta={\"prd_data\": d}, callback=self.parse_package) next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get() if", "package_obj.get(\"stringFormat\"), 'price': catalog_price and catalog_price.get('Value'), 'currency': catalog_price and strip(catalog_price.get('Currency')), 'attrs':", "tmp_url = self.base_url + xp_url.extract() yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list) def", "xp_url.extract() yield Request(tmp_url.replace(\"EN\", \"CH\"), callback=self.parse_list) def parse_list(self, response): xp_boxes =", "xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u\"\", meta={\"prd_data\":", "package: d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName') yield JkProduct(**d) for", "yield JkProduct(**d) for package_obj in packages: catalog_price = package_obj.get(\"CatalogPrice\", {})", "class JkPrdSpider(scrapy.Spider): name = \"jk\" allowed_domains = [\"jkchemical.com\"] base_url =", "= div.xpath('.//a[@class=\"name\"]/@href').get() img_url = div.xpath('.//img/@src').get() d = { 'brand': brand.replace('-',", "more_itertools import first from scrapy import Request from product_spider.items import", "xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())), body=u\"\", meta={\"prd_data\": d}, callback=self.parse_package) next_page", "dd = { 'brand': d.get('brand'), 'cat_no': d.get('cat_no'), 'package': package_obj.get(\"stringFormat\"), 'price':", "from string import ascii_uppercase from time import time from urllib.parse", "} data_jkid = xp_box.xpath(\".//div[@data-jkid]/@data-jkid\").get() data_cid = xp_box.xpath(\".//div[@data-cid]/@data-cid\").get() yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid,", "d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName') yield JkProduct(**d) for package_obj", "import JkProduct, JKPackage from product_spider.utils.functions import strip class JkPrdSpider(scrapy.Spider): name", "JkProduct, JKPackage from product_spider.utils.functions import strip class JkPrdSpider(scrapy.Spider): name =", "package_obj.get(\"CatalogPrice\", {}) dd = { 'brand': d.get('brand'), 'cat_no': d.get('cat_no'), 'package':", "img_url = div.xpath('.//img/@src').get() d = { 'brand': brand.replace('-', '') or", "JkProduct(**d) for package_obj in packages: catalog_price = package_obj.get(\"CatalogPrice\", {}) dd", "= \"jk\" allowed_domains = [\"jkchemical.com\"] base_url = \"http://www.jkchemical.com\" start_urls =", "package.get('Product', {}).get('BrandName') yield JkProduct(**d) for package_obj in packages: catalog_price =", "\"cat_no\": div.xpath(\".//li[4]/text()\").get().split(u\":\")[-1].strip(), \"en_name\": strip(xp_box.xpath(\".//a[@class='name']/text()\").get()), \"cn_name\": strip(xp_box.xpath(\".//a[@class='name']//span[1]/text()\").get()), 'prd_url': rel_url and urljoin(response.url,", "first from scrapy import Request from product_spider.items import JkProduct, JKPackage", "callback=self.parse_list) def parse_package(self, response): s = re.findall(r\"(?<=\\().+(?=\\))\", response.text)[0] packages =", "for xp_url in response.xpath(\"//div[@class='yy toa']//a/@href\"): tmp_url = self.base_url + xp_url.extract()", "catalog_price.get('Value'), 'currency': catalog_price and strip(catalog_price.get('Currency')), 'attrs': json.dumps(package_obj), } yield JKPackage(**dd)", "cid=data_cid, ts=int(time())), body=u\"\", meta={\"prd_data\": d}, callback=self.parse_package) next_page = response.xpath('//a[contains(text(), \"下一页\")]/@href').get()", "from product_spider.items import JkProduct, JKPackage from product_spider.utils.functions import strip class", "= div.xpath('.//img/@src').get() d = { 'brand': brand.replace('-', '') or None,", "'brand': brand.replace('-', '') or None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\": strip(div.xpath(\".//li[2]//a/text()\").get()), \"cat_no\":", "'package': package_obj.get(\"stringFormat\"), 'price': catalog_price and catalog_price.get('Value'), 'currency': catalog_price and strip(catalog_price.get('Currency')),", "\"http://www.jkchemical.com/CH/products/index/ProductName/{0}.html\".format(x), ascii_uppercase) prd_size_url = \"http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}\" def parse(self, response): for xp_url", "= { 'brand': brand.replace('-', '') or None, \"purity\": div.xpath(\".//li[1]/text()\").get('').split(u\":\")[-1].strip(), \"cas\":" ]
[ "if veh is not None: self.observation[name * 4 + 0]", "in preStpe self.preStep() if self.timestep > 5000: raise Exception('cannot find", "self.rd) for veh_id in list(self.veh_dict.keys()): if veh_id not in veh_id_tuple:", "if self.lateral_action != 1 and 0 < TTC < 2:", "on: successfully lane change, dis2targetlane:', # self.ego.dis2tgtLane) # too close", "self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 # # print('alpha', alpha) # assert 0", "vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic == 2: # average", "self.timestep > 5000: raise Exception('cannot find ego after 5000 timesteps')", "else: r_lat_c = 0 if self.ego.targetLeaderID is not None: #", "= () self.egoID = id self.ego = None # self.tgtLane", "self.observation[0] = self.ego.lanePos self.observation[1] = self.ego.speed self.observation[2] = self.ego.pos_lat self.observation[3]", "not None: # compute longitudinal time gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed", "Exception('cannot find ego after 5000 timesteps') assert self.egoID in self.vehID_tuple_all,", "- math.exp(-2*TTC+5) else: r_long_c = 0 if self.lateral_action == 0:", "1.1 r_lat_c = -math.exp(-4*alpha+5) else: r_lat_c = 0 if self.ego.targetLeaderID", "os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else: sys.exit(\"please declare environment variable 'SUMO_HOME'\")", "Road() self.timestep = 0 self.dt = traci.simulation.getDeltaT() self.randomseed = None", "-math.exp(-4*alpha+5) else: r_lat_c = 0 if self.ego.targetLeaderID is not None:", "= self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce if", "1 wt = 1 ws = 1 we = 1", "import os import sys import random import datetime import gym", "TTC2 = - abs(delta_D2) / delta_V2 else: TTC2 = -delta_V2", "= 0 if self.lateral_action == 0: #abort lane change alpha", "randomness if seed is None: self.sumoCmd += ['--random'] else: self.sumoCmd", "== 2: # average 19 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else:", "= traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM()", "None # (float) : amount of reward returned after previous", "environment if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools)", "None: self.observation[name * 4 + 0] = veh.lanePos self.observation[name *", "delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce if delta_A == 0: TTC", "self.egoID not in self.vehID_tuple_all: self.done = True # print('reset on:", "reward returned after previous action self.done = True # (bool):", "Returns: described in __init__ \"\"\" action_longi = action // 3", "self.ego.leaderID is not None: # compute longitudinal time gap delta_V", "self.lateral_action == 1: # lane change alpha = abs(self.ego.pos_lat -", "self.ego.leaderDis) TTC = TTC/delta_A if self.lateral_action != 1 and 0", "to ramp entrance, dis2targetlane:', # self.ego.dis2tgtLane) # ego vehicle out", "is True: self.info['resetFlag'] = True return self.observation, 0.0, self.done, self.info", "gui is True: self.sumoBinary += '-gui' self.sumoCmd = [self.sumoBinary] +", "= 1 we = 1 # reward related to comfort", "# compute longitudinal time gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed", "math # add sumo/tools into python environment if 'SUMO_HOME' in", "action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01:", "if self.timestep > 5000: raise Exception('cannot find ego after 5000", "/ self.ego.dis2entrance r_effi_all = r_time + r_speed + r_effi #", "== 0: #abort lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)", "- (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) #", "self.is_success = self.ego.changeLane(True, -1, self.rd) # longitudinal control2--------------------- acceNext =", "on: too close to ramp entrance, dis2targetlane:', # self.ego.dis2tgtLane) #", "self.info def seed(self, seed=None): if seed is None: self.randomseed =", "3 self.lateral_action = action_lateral # action_longi = action[0] # action_lateral", "1: accelerate action[0] = -1: decelerate action[0] = 0: use", "of the ego may change lateral: action[1] = 1: lane", "self.observation = np.array(self.observation).flatten() # print(self.observation.shape) def updateReward(self): return -self.ego.dis2tgtLane def", "== 1: # lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)", "change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert 0", "self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) # print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth)", "+ r_safe return r_total def is_done(self): # lane change successfully", "if self.done is True: self.info['resetFlag'] = True return self.observation, 0.0,", "self.seed(randomseed) if sumoseed is None: self.sumoseed = self.randomseed traci.close() self.__init__(id=egoid,", "2 # reward related to efficiency r_time = - wt", "self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd = ['-c', self.cfg, # '--lanechange.duration', str(3),", "- 0.5*self.rd.laneWidth)) # abort lane change, change back to ego's", "traci.simulation.getCollidingVehiclesNumber() if self.collision_num > 0: self.done = True # print('reset", "# target lane follower self.observation = np.empty(20) self.reward = None", "keep current lateral position if action_lateral == 2: self.is_success =", "# # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha = abs(self.ego.pos_lat", "if action_lateral == 2: self.is_success = self.ego.changeLane(True, -1, self.rd) #", "self.reward, self.done, self.info def seed(self, seed=None): if seed is None:", "to target lane if not self.is_success: if action_lateral == 1:", "longitudinal control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext = self.ego.speed", "self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed) # continue step until ego appears", "self.updateReward() return self.observation, self.reward, self.done, self.info def seed(self, seed=None): if", "= we * self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all = r_time +", "+ 5) else: r_long_t = 0 if self.lateral_action == 1:", "import math # add sumo/tools into python environment if 'SUMO_HOME'", "in env' self.timestep += 1 # lateral control------------------------- # episode", "self.is_success = False self.collision_num = 0 self.lateral_action = 2 #", "# keep current lateral position if action_lateral == 2: self.is_success", "gap delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce -", "[[0, 0, 0], # ego lane position and speed #", "time gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce", "+ r_safe_tgtleader # total reward r_total = r_comf + r_effi_all", "traffic flow density if traffic == 0: # average 9", "+ acceNext * 0.1 traci.vehicle.setSpeed(self.egoID, vNext) # update info------------------------------ traci.simulationStep()", "self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all = r_time + r_speed + r_effi", "<= alpha <= 1.1 r_lat_t = -math.exp(-4*alpha+5) else: r_lat_t =", "check if rational def updateObservation(self): self.observation[0] = self.ego.lanePos self.observation[1] =", "lateral: action[1] = 1: lane change action[1] = 0: abort", "+ 300. self.observation[name * 4 + 1] = self.observation[1] self.observation[name", "self.observation[name * 4 + 3] = veh.acce else: self.observation[name *", "r_safe_tgtleader # total reward r_total = r_comf + r_effi_all +", "print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth)) # abort lane change, change back", "self.veh_dict.keys(): # must ensure safety in preStpe self.preStep() if self.timestep", "str(float/int), instead of '3.0' self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd = ['-c',", "self.preStep() if self.timestep > 5000: raise Exception('cannot find ego after", "0: follow original lane leader action[0] = 1: follow closer", "self.cfg, # '--lanechange.duration', str(3), # using 'Simple Continuous lane-change model'", "'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must be string, if float/int, must be", "is True: self.sumoBinary += '-gui' self.sumoCmd = [self.sumoBinary] + self.sumoCmd", "and returns a tuple (observation, reward, done, info). Args: action", "self.updateObservation() self.reward = self.updateReward() return self.observation, self.reward, self.done, self.info def", "1] = self.observation[1] self.observation[name * 4 + 2] = 4.8", "self.done, self.info else: self.updateObservation() self.reward = self.updateReward() return self.observation, self.reward,", "= 1 wt = 1 ws = 1 we =", "= True # print('reset on: self.collision_num:', self.collision_num) def preStep(self): traci.simulationStep()", "self.ego.leaderID is not None: # # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat))", "= action[0] # action_lateral = action[1] assert self.done is False,", "lane change, change back to ego's original lane if action_lateral", "delta_A2 if self.lateral_action == 1 and 0 < TTC2 <", "target lane leader **important**: orginal/target lane leader will not change", "traffic=1, gui=False, seed=None): # todo check traffic flow density if", "* alpha + w_longi * (1 - alpha) * abs(", "* self.timestep r_speed = ws * (self.ego.speed - self.ego_speedLimit) r_effi", "dis2targetlane:', # self.ego.dis2tgtLane) # too close to ramp entrance if", "traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2): \"\"\"Run one timestep of the", "which case further step() calls will return undefined results self.info", "0 <= alpha <= 1.1 r_lat_t = -math.exp(-4*alpha+5) else: r_lat_t", "3 action_lateral = action % 3 self.lateral_action = action_lateral #", "self.ego.trgt_laneIndex, self.rd) # print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) #", "env.Road import Road from env.Vehicle import Vehicle import math #", "self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2): \"\"\"Run one timestep", "\"\"\" action_longi = action // 3 action_lateral = action %", "self.reward = self.updateReward() return self.observation, self.reward, self.done, self.info def seed(self,", "= self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower)", "rational def updateObservation(self): self.observation[0] = self.ego.lanePos self.observation[1] = self.ego.speed self.observation[2]", "described in __init__ \"\"\" action_longi = action // 3 action_lateral", "= abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert 0 <= alpha", "1.1 # # r_safe_tgtleader = w_lateral * alpha + w_longi", "add sumo/tools into python environment if 'SUMO_HOME' in os.environ: tools", "safety in preStpe self.preStep() if self.timestep > 5000: raise Exception('cannot", "ws = 1 we = 1 # reward related to", "arguments must be string, if float/int, must be converted to", "reset env # todo modify if self.is_success: self.done = True", "alpha) # assert 0 <= alpha <= 1.1 # #", "self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self, name, veh): \"\"\" :param name: 0:ego;", "string, if float/int, must be converted to str(float/int), instead of", "change despite the lateral position of the ego may change", "self.veh_dict[self.ego.targetLeaderID].lanePos) # else: # r_safe_tgtleader = 0 # # #", "* (1 - alpha) * abs( # self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos)", "is not None: # # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) #", "alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert 0 <=", "+ 0] = veh.lanePos self.observation[name * 4 + 1] =", "in self.vehID_tuple_all, 'vehicle not in env' self.timestep += 1 #", "# must ensure safety in preStpe self.preStep() if self.timestep >", "related to safety w_lateral = 1 w_longi = 1 if", "veh_id in veh_id_tuple: if veh_id not in self.veh_dict.keys(): self.veh_dict[veh_id] =", "traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed) # continue step until ego", "self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation =", "change lateral: action[1] = 1: lane change action[1] = 0:", "random.seed(self.randomseed) def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None): \"\"\"", "- self.ego_speedLimit) r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all", "2: r_long_t = - math.exp(-2 * TTC2 + 5) else:", "= 2 # self.observation = [[0, 0, 0], # ego", "abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd)", "assert self.egoID in self.vehID_tuple_all, 'vehicle not in env' self.timestep +=", "{ 'resetFlag': 0} # (dict): contains auxiliary diagnostic information (helpful", "* 4 + 3] = 0 # todo check if", "on: self.collision_num:', self.collision_num) def preStep(self): traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all)", "tfc: int. 0:light; 1:medium; 2:dense :return: initial observation \"\"\" self.seed(randomseed)", "delta_A2 == 0: TTC2 = - abs(delta_D2) / delta_V2 else:", "(1 - alpha) * abs( # self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) #", "episode ends self.is_done() if self.done is True: self.info['resetFlag'] = True", "lateral position Returns: described in __init__ \"\"\" action_longi = action", "2:target leader; 3:target follower :param id: vehicle id corresponding to", "= -math.exp(-4*alpha+5) else: r_lat_t = 0 r_safe = w_lateral *", "+ r_effi_all + r_safe return r_total def is_done(self): # lane", "the environment's dynamics. When end of episode is reached, call", "out of env if self.egoID not in self.vehID_tuple_all: self.done =", "self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation = np.array(self.observation).flatten() # print(self.observation.shape) def updateReward(self):", "this environment's state. Accepts an action and returns a tuple", "= 1 wc2 = 1 wt = 1 ws =", "speed # [0, 0, 0], # leader # [0, 0,", "else: r_long_t = 0 if self.lateral_action == 1: # lane", "'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) # print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth)) #", "reset this environment's state. Accepts an action and returns a", "= 1: follow closer leader longitudinal2: action[0] = 0: follow", "alpha) * abs(self.ego.leaderDis) # else: # r_safe_leader = 0 #", "+ 1] = self.observation[1] self.observation[name * 4 + 2] =", "else: r_lat_t = 0 r_safe = w_lateral * (r_lat_c +", "3] = 0 # todo check if rational def updateObservation(self):", "is not None: # compute longitudinal time gap delta_V =", "self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane self.ego.is_ego = 1 # set ego", "abort lane change, change back to ego's original lane if", "not in env' self.timestep += 1 # lateral control------------------------- #", "lane if not self.is_success: if action_lateral == 1: # and", "r_long_c = 0 if self.lateral_action == 0: #abort lane change", "action_lateral = action[1] assert self.done is False, 'self.done is not", "r_lat_t = -math.exp(-4*alpha+5) else: r_lat_t = 0 r_safe = w_lateral", "update_veh_dict(self, veh_id_tuple): for veh_id in veh_id_tuple: if veh_id not in", "+ r_speed + r_effi # reward related to safety w_lateral", "is not None: while self.egoID not in self.veh_dict.keys(): # must", "# average 19 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average", "preStpe self.preStep() if self.timestep > 5000: raise Exception('cannot find ego", "0: follow original lane leader action[0] = 1: follow target", "r_long_t = - math.exp(-2 * TTC2 + 5) else: r_long_t", "will return undefined results self.info = { 'resetFlag': 0} #", "traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation() return self.observation", "print('reset on: successfully lane change, dis2targetlane:', # self.ego.dis2tgtLane) # too", "# if self.ego.targetLeaderID is not None: # # print('lateralPos2tgtleader', abs(self.ego.pos_lat", "r_comf = wc1 * self.ego.acce ** 2 + wc2 *", "lateral position of the ego may change lateral: action[1] =", "env.Vehicle import Vehicle import math # add sumo/tools into python", "TTC = TTC/delta_A if self.lateral_action != 1 and 0 <", "np.empty(20) self.reward = None # (float) : amount of reward", "we = 1 # reward related to comfort r_comf =", "self.ego not in env:', self.egoID not in self.vehID_tuple_all) # collision", "* 4 + 0] = veh.lanePos self.observation[name * 4 +", "self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 # assert 0 <= alpha <= 1.1", "ramp entrance if self.ego.dis2entrance < 10.0: self.done = True #", "self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation() return self.observation return", "self.lateral_action = action_lateral # action_longi = action[0] # action_lateral =", "= Vehicle(veh_id, self.rd) for veh_id in list(self.veh_dict.keys()): if veh_id not", "= None # self.tgtLane = tgtlane self.is_success = False self.collision_num", "self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True), '--start', str(True)]", "= True # print('reset on: successfully lane change, dis2targetlane:', #", "seed random.seed(self.randomseed) def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None):", "> 5000: raise Exception('cannot find ego after 5000 timesteps') assert", "self.veh_dict[self.ego.leaderID].acce - self.ego.acce if delta_A == 0: TTC = -", "numpy as np from env.IDM import IDM from env.Road import", "1 we = 1 # reward related to comfort r_comf", "vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit =", "calls will return undefined results self.info = { 'resetFlag': 0}", "(object): longitudinal0: action[0] = 1: accelerate action[0] = -1: decelerate", "(float) : amount of reward returned after previous action self.done", "/ delta_A2 if self.lateral_action == 1 and 0 < TTC2", "**important**: orginal/target lane leader will not change despite the lateral", "= self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) # print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith',", "if self.ego.leaderID is not None: # # ('lateralPos2leader', abs(self.ego.pos_lat -", "step(self, action=2): \"\"\"Run one timestep of the environment's dynamics. When", "veh.lanePos self.observation[name * 4 + 1] = veh.speed self.observation[name *", "lane change, dis2targetlane:', # self.ego.dis2tgtLane) # too close to ramp", "wc1 = 1 wc2 = 1 wt = 1 ws", "lane follower self.observation = np.empty(20) self.reward = None # (float)", "0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success =", "<= 1.1 # # r_safe_tgtleader = w_lateral * alpha +", "back to original line; 1:lane change to target lane; 2:keep", "= 1: lane change action[1] = 0: abort lane change,", "# arguments must be string, if float/int, must be converted", "print('success') else: sys.exit(\"please declare environment variable 'SUMO_HOME'\") import traci ######################################################################", "efficiency r_time = - wt * self.timestep r_speed = ws", "in self.vehID_tuple_all) # collision occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber() if self.collision_num", "= 1 ws = 1 we = 1 # reward", "set ego vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid)", "leader **important**: orginal/target lane leader will not change despite the", "0.0, self.done, self.info else: self.updateObservation() self.reward = self.updateReward() return self.observation,", "follow closer leader longitudinal2: action[0] = 0: follow original lane", "TTC2 = TTC2 / delta_A2 if self.lateral_action == 1 and", "from env.IDM import IDM from env.Road import Road from env.Vehicle", "= action % 3 self.lateral_action = action_lateral # action_longi =", "not in self.vehID_tuple_all) # collision occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber() if", "def step(self, action=2): \"\"\"Run one timestep of the environment's dynamics.", "r_safe_leader = w_lateral * alpha + w_longi * (1 -", "- alpha) * abs( # self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) # else:", "else: self.sumoCmd += ['--seed', str(seed)] # gui if gui is", "Args: action (object): longitudinal0: action[0] = 1: accelerate action[0] =", "and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex,", "leader action[0] = 1: follow closer leader longitudinal2: action[0] =", "'vehicle not in env' self.timestep += 1 # lateral control-------------------------", "= 0: follow original lane leader action[0] = 1: follow", "self.sumoCmd += ['--seed', str(seed)] # gui if gui is True:", "r_safe = r_safe_leader + r_safe_tgtleader # total reward r_total =", "'rdWdith', self.rd.laneWidth) # print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth)) # abort lane", "/ 3.2 # assert 0 <= alpha <= 1.1 #", "traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2): \"\"\"Run one", "lane; 2:keep current # lane change to target lane if", "abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 # # print('alpha', alpha) #", "not in self.veh_dict.keys(): # must ensure safety in preStpe self.preStep()", "self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average 14 vehicles self.cfg =", "start training while ego is not in env\" self.done =", "(helpful for debugging, and sometimes learning) self.action_space = spaces.Discrete(6) self.observation_space", "TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 + 2 *", "- self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 # # print('alpha', alpha) # assert", "# # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) # alpha = abs(self.ego.pos_lat", "current lateral position if action_lateral == 2: self.is_success = self.ego.changeLane(True,", "# average 9 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic ==", "== 0: TTC = - abs(self.ego.leaderDis)/delta_V else: TTC = -delta_V", "2 + wc2 * self.ego.delta_acce ** 2 # reward related", "self.observation[name * 4 + 2] = veh.pos_lat self.observation[name * 4", "from env.Road import Road from env.Vehicle import Vehicle import math", "self.done, self.info def seed(self, seed=None): if seed is None: self.randomseed", "is_done(self): # lane change successfully executed, episode ends, reset env", "5000: raise Exception('cannot find ego after 5000 timesteps') assert self.egoID", "() self.egoID = id self.ego = None # self.tgtLane =", "too close to ramp entrance if self.ego.dis2entrance < 10.0: self.done", "lane change to target lane if not self.is_success: if action_lateral", "1] = veh.speed self.observation[name * 4 + 2] = veh.pos_lat", "\"\"\"Run one timestep of the environment's dynamics. When end of", "SUMO default action[0] = others: acce = 0.0 longitudinal1: action[0]", "- self.veh_dict[self.ego.leaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2", "0 <= alpha <= 1.1 # # r_safe_tgtleader = w_lateral", "change, dis2targetlane:', # self.ego.dis2tgtLane) # too close to ramp entrance", "sys import random import datetime import gym from gym import", "has ended, in which case further step() calls will return", "Vehicle(veh_id, self.rd) for veh_id in list(self.veh_dict.keys()): if veh_id not in", "import Vehicle import math # add sumo/tools into python environment", "not None, 'action is None' assert self.egoID in self.vehID_tuple_all, 'vehicle", "self.ego.pos_lat self.observation[3] = self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader)", "not in veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict)", "# # print('alpha', alpha) # assert 0 <= alpha <=", "0 < TTC < 2: r_long_c = - math.exp(-2*TTC+5) else:", "print(acceNext) vNext = self.ego.speed + acceNext * 0.1 traci.vehicle.setSpeed(self.egoID, vNext)", "self.ego.acce if delta_A == 0: TTC = - abs(self.ego.leaderDis)/delta_V else:", "% 3 self.lateral_action = action_lateral # action_longi = action[0] #", "* self.ego.delta_acce ** 2 # reward related to efficiency r_time", "* self.ego.acce ** 2 + wc2 * self.ego.delta_acce ** 2", "in self.vehID_tuple_all, \"cannot start training while ego is not in", "self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation()", "= self.veh_dict[self.ego.leaderID].acce - self.ego.acce if delta_A == 0: TTC =", "others: acce = 0.0 longitudinal1: action[0] = 0: follow original", "veh_id_tuple): for veh_id in veh_id_tuple: if veh_id not in self.veh_dict.keys():", ":param id: vehicle id corresponding to name :return: \"\"\" if", "2: # average 19 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: #", "\"\"\" :param name: 0:ego; 1:leader; 2:target leader; 3:target follower :param", "= traci.simulation.getDeltaT() self.randomseed = None self.sumoseed = None self.veh_dict =", "modify if self.is_success: self.done = True # print('reset on: successfully", "python environment if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools')", "action % 3 self.lateral_action = action_lateral # action_longi = action[0]", "None: while self.egoID not in self.veh_dict.keys(): # must ensure safety", "[0, 0, 0], # target lane leader # [0, 0,", "True # print('reset on: self.collision_num:', self.collision_num) def preStep(self): traci.simulationStep() self.vehID_tuple_all", "in self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id, self.rd) for veh_id in list(self.veh_dict.keys()):", "[self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True), '--start', str(True)] else: self.sumoCmd", "longitudinal time gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2 =", "self.ego.speed self.observation[2] = self.ego.pos_lat self.observation[3] = self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2,", "self.ego.dis2entrance r_effi_all = r_time + r_speed + r_effi # reward", "# [0, 0, 0]] # target lane follower self.observation =", "self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id, self.rd) for veh_id in list(self.veh_dict.keys()): if", "target lane follower self.observation = np.empty(20) self.reward = None #", "math.sqrt(delta_V2 ** 2 + 2 * delta_A2 * delta_D2) TTC2", "r_total def is_done(self): # lane change successfully executed, episode ends,", "if sumoseed is None: self.sumoseed = self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc,", "delta_D2) TTC2 = TTC2 / delta_A2 if self.lateral_action == 1", "= \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd = ['-c', self.cfg, # '--lanechange.duration', str(3), #", "def seed(self, seed=None): if seed is None: self.randomseed = datetime.datetime.now().microsecond", "300. self.observation[name * 4 + 1] = self.observation[1] self.observation[name *", "self.done = True # print('reset on: successfully lane change, dis2targetlane:',", "- math.sqrt(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)", "self.observation[name * 4 + 2] = 4.8 self.observation[name * 4", "on: self.ego not in env:', self.egoID not in self.vehID_tuple_all) #", "= TTC2 / delta_A2 if self.lateral_action == 1 and 0", "done, info). Args: action (object): longitudinal0: action[0] = 1: accelerate", "mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor *", "reward r_total = r_comf + r_effi_all + r_safe return r_total", "check if episode ends self.is_done() if self.done is True: self.info['resetFlag']", "** 2 + 2 * delta_A2 * delta_D2) TTC2 =", "not in self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id, self.rd) for veh_id in", "+ 3] = veh.acce else: self.observation[name * 4 + 0]", "# r_safe_tgtleader = 0 # # # r_safe = r_safe_leader", "action_longi = action // 3 action_lateral = action % 3", "= traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check if episode ends self.is_done() if", "# leader # [0, 0, 0], # target lane leader", "# [0, 0, 0], # target lane leader # [0,", "not None: # # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) # alpha", "into python environment if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'],", "self.done is False, 'self.done is not False' assert action is", "* self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all = r_time + r_speed +", "self.ego.trgt_follower) # self.observation = np.array(self.observation).flatten() # print(self.observation.shape) def updateReward(self): return", "self.dt = traci.simulation.getDeltaT() self.randomseed = None self.sumoseed = None self.veh_dict", "LaneChangeEnv(gym.Env): def __init__(self, id=None, traffic=1, gui=False, seed=None): # todo check", "id=None, traffic=1, gui=False, seed=None): # todo check traffic flow density", "2 # self.observation = [[0, 0, 0], # ego lane", "average 14 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must be", "0, 0], # ego lane position and speed # [0,", "id self.ego = None # self.tgtLane = tgtlane self.is_success =", "# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 # #", "r_speed = ws * (self.ego.speed - self.ego_speedLimit) r_effi = we", "to efficiency r_time = - wt * self.timestep r_speed =", "None: # compute longitudinal time gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed -", "timesteps') assert self.egoID in self.vehID_tuple_all, \"cannot start training while ego", "abs( # self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) # else: # r_safe_tgtleader =", "def updateReward(self): return -self.ego.dis2tgtLane def updateReward2(self): wc1 = 1 wc2", "one timestep of the environment's dynamics. When end of episode", "if episode ends self.is_done() if self.done is True: self.info['resetFlag'] =", "self.ego.speed + acceNext * 0.1 traci.vehicle.setSpeed(self.egoID, vNext) # update info------------------------------", "target lane if not self.is_success: if action_lateral == 1: #", "0, 0]] # target lane follower self.observation = np.empty(20) self.reward", "seed=None): if seed is None: self.randomseed = datetime.datetime.now().microsecond else: self.randomseed", "self.observation[1] self.observation[name * 4 + 2] = 4.8 self.observation[name *", "action[0] = -1: decelerate action[0] = 0: use SUMO default", "and 0 < TTC < 2: r_long_c = - math.exp(-2*TTC+5)", "must be string, if float/int, must be converted to str(float/int),", "using 'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length', str(0.1)] # randomness if seed", "collision occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber() if self.collision_num > 0: self.done", "import datetime import gym from gym import spaces import numpy", "target lane leader # [0, 0, 0]] # target lane", "+ r_effi # reward related to safety w_lateral = 1", "0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj", "self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 #", "follower :param id: vehicle id corresponding to name :return: \"\"\"", "'action is None' assert self.egoID in self.vehID_tuple_all, 'vehicle not in", "if self.ego.targetLeaderID is not None: # # print('lateralPos2tgtleader', abs(self.ego.pos_lat -", "episode ends, reset env # todo modify if self.is_success: self.done", "0.01: self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) # print('left', 1.5 *", "* 0.1 traci.vehicle.setSpeed(self.egoID, vNext) # update info------------------------------ traci.simulationStep() self.vehID_tuple_all =", "training while ego is not in env\" self.done = False", "'--start', str(True)] else: self.sumoCmd = [self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd) self.rd", "env.IDM import IDM from env.Road import Road from env.Vehicle import", "r_time = - wt * self.timestep r_speed = ws *", "position of the ego may change lateral: action[1] = 1:", "== 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success", "import traci ###################################################################### # simulation environments class LaneChangeEnv(gym.Env): def __init__(self,", "use SUMO default action[0] = others: acce = 0.0 longitudinal1:", "self.observation[name * 4 + 0] = self.observation[0] + 300. self.observation[name", "abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd)", "abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) /", "4 + 1] = self.observation[1] self.observation[name * 4 + 2]", "# else: # r_safe_leader = 0 # if self.ego.targetLeaderID is", "r_safe = w_lateral * (r_lat_c + r_lat_t) + w_longi *", "change to target lane if not self.is_success: if action_lateral ==", ":return: initial observation \"\"\" self.seed(randomseed) if sumoseed is None: self.sumoseed", "case further step() calls will return undefined results self.info =", "to ramp entrance if self.ego.dis2entrance < 10.0: self.done = True", "import sys import random import datetime import gym from gym", "delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce", "control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext = self.ego.speed +", "auxiliary diagnostic information (helpful for debugging, and sometimes learning) self.action_space", "change back to ego's original lane if action_lateral == 0:", "datetime import gym from gym import spaces import numpy as", "else: self.observation[name * 4 + 0] = self.observation[0] + 300.", "and sometimes learning) self.action_space = spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf, high=np.inf,", "self.ego.orig_laneIndex, self.rd) # print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat) #", "seed=self.sumoseed) # continue step until ego appears in env if", "print('reset on: too close to ramp entrance, dis2targetlane:', # self.ego.dis2tgtLane)", "= 0 if self.ego.targetLeaderID is not None: # compute longitudinal", "\"\"\" if veh is not None: self.observation[name * 4 +", "wc1 * self.ego.acce ** 2 + wc2 * self.ego.delta_acce **", "TTC = - abs(self.ego.leaderDis)/delta_V else: TTC = -delta_V - math.sqrt(delta_V**2", "line; 1:lane change to target lane; 2:keep current # lane", "self.observation[1] = self.ego.speed self.observation[2] = self.ego.pos_lat self.observation[3] = self.ego.acce self._updateObservationSingle(1,", "+ w_longi * (r_long_c+ r_long_t) # # if self.ego.leaderID is", "speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor", "np from env.IDM import IDM from env.Road import Road from", "previous action self.done = True # (bool): whether the episode", "r_safe return r_total def is_done(self): # lane change successfully executed,", "0: use SUMO default action[0] = others: acce = 0.0", "0 <= alpha <= 1.1 r_lat_c = -math.exp(-4*alpha+5) else: r_lat_c", "assert self.done is False, 'self.done is not False' assert action", "self.sumoCmd = [self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd) self.rd = Road() self.timestep", "# self.ego.dis2tgtLane) # too close to ramp entrance if self.ego.dis2entrance", "gui=False, seed=None): # todo check traffic flow density if traffic", "if self.ego.targetLeaderID is not None: # compute longitudinal time gap", "os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else: sys.exit(\"please declare", "str(True)] else: self.sumoCmd = [self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd) self.rd =", "r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all = r_time", "= 0: abort lane change, change back to original lane", "= spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def update_veh_dict(self, veh_id_tuple):", "env if self.egoID not in self.vehID_tuple_all: self.done = True #", "self.ego.acce ** 2 + wc2 * self.ego.delta_acce ** 2 #", "1: follow target lane leader **important**: orginal/target lane leader will", "self.timestep += 1 # lateral control------------------------- # episode in progress;", "+ 2 * delta_A2 * delta_D2) TTC2 = TTC2 /", "= 1: accelerate action[0] = -1: decelerate action[0] = 0:", "self.veh_dict[veh_id] = Vehicle(veh_id, self.rd) for veh_id in list(self.veh_dict.keys()): if veh_id", "- self.ego.lanePos if delta_A2 == 0: TTC2 = - abs(delta_D2)", "action // 3 action_lateral = action % 3 self.lateral_action =", "'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else:", "+ 2] = veh.pos_lat self.observation[name * 4 + 3] =", "`reset()` outside env!! to reset this environment's state. Accepts an", "executed, episode ends, reset env # todo modify if self.is_success:", "r_long_t) # # if self.ego.leaderID is not None: # #", "= others: acce = 0.0 longitudinal1: action[0] = 0: follow", "action_lateral # action_longi = action[0] # action_lateral = action[1] assert", "dynamics. When end of episode is reached, call `reset()` outside", "reward related to efficiency r_time = - wt * self.timestep", "** 2 # reward related to efficiency r_time = -", "<= 1.1 r_lat_c = -math.exp(-4*alpha+5) else: r_lat_c = 0 if", "import spaces import numpy as np from env.IDM import IDM", "__init__(self, id=None, traffic=1, gui=False, seed=None): # todo check traffic flow", "until ego appears in env if self.egoID is not None:", "str(0.1), '--default.action-step-length', str(0.1)] # randomness if seed is None: self.sumoCmd", "to comfort r_comf = wc1 * self.ego.acce ** 2 +", "self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce if delta_A", "1: follow closer leader longitudinal2: action[0] = 0: follow original", "change action[1] = 0: abort lane change, change back to", "reward related to comfort r_comf = wc1 * self.ego.acce **", "updateReward2(self): wc1 = 1 wc2 = 1 wt = 1", "state. Accepts an action and returns a tuple (observation, reward,", "r_effi_all = r_time + r_speed + r_effi # reward related", "be converted to str(float/int), instead of '3.0' self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\"", "= True # print('reset on: too close to ramp entrance,", "wc2 * self.ego.delta_acce ** 2 # reward related to efficiency", "vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average 14 vehicles self.cfg", "[0, 0, 0], # leader # [0, 0, 0], #", "an action and returns a tuple (observation, reward, done, info).", "= abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 # assert 0 <=", "vNext) # update info------------------------------ traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) #", "= self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) # print('left', 1.5 * self.rd.laneWidth -", "- self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert 0 <= alpha <= 1.1", "= abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 # # print('alpha', alpha)", "1.5 * self.rd.laneWidth - self.ego.pos_lat) # keep current lateral position", "import random import datetime import gym from gym import spaces", "# if self.ego.leaderID is not None: # # ('lateralPos2leader', abs(self.ego.pos_lat", "not None: self.observation[name * 4 + 0] = veh.lanePos self.observation[name", "veh_id not in self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id, self.rd) for veh_id", "is False, 'self.done is not False' assert action is not", "corresponding to name :return: \"\"\" if veh is not None:", "decelerate action[0] = 0: use SUMO default action[0] = others:", "# reward related to safety w_lateral = 1 w_longi =", "list(self.veh_dict.keys()): if veh_id not in veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id in", "# target lane leader # [0, 0, 0]] # target", "further step() calls will return undefined results self.info = {", "r_safe_leader + r_safe_tgtleader # total reward r_total = r_comf +", "traffic == 0: # average 9 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg'", "if gui is True: self.sumoBinary += '-gui' self.sumoCmd = [self.sumoBinary]", "veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self,", "we * self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all = r_time + r_speed", "= ['-c', self.cfg, # '--lanechange.duration', str(3), # using 'Simple Continuous", "self.done = False self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane self.ego.is_ego", "= seed random.seed(self.randomseed) def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None,", "self.egoID in self.vehID_tuple_all, 'vehicle not in env' self.timestep += 1", "def updateObservation(self): self.observation[0] = self.ego.lanePos self.observation[1] = self.ego.speed self.observation[2] =", "env' self.timestep += 1 # lateral control------------------------- # episode in", "# print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat) # keep current", "0 r_safe = w_lateral * (r_lat_c + r_lat_t) + w_longi", "+ w_longi * (1 - alpha) * abs(self.ego.leaderDis) # else:", "self.vehID_tuple_all) # collision occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber() if self.collision_num >", "entrance if self.ego.dis2entrance < 10.0: self.done = True # print('reset", "successfully lane change, dis2targetlane:', # self.ego.dis2tgtLane) # too close to", "0 # todo check if rational def updateObservation(self): self.observation[0] =", "* 4 + 1] = veh.speed self.observation[name * 4 +", "0.0 longitudinal1: action[0] = 0: follow original lane leader action[0]", "in self.veh_dict.keys(): # must ensure safety in preStpe self.preStep() if", "list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self, name, veh): \"\"\" :param name:", "None: # compute longitudinal time gap delta_V = self.veh_dict[self.ego.leaderID].speed -", "r_effi_all + r_safe return r_total def is_done(self): # lane change", "<= alpha <= 1.1 # r_safe_leader = w_lateral * alpha", "self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) # else: # r_safe_tgtleader = 0 #", "original lane leader action[0] = 1: follow closer leader longitudinal2:", "# [0, 0, 0], # leader # [0, 0, 0],", "3.2 assert 0 <= alpha <= 1.1 r_lat_c = -math.exp(-4*alpha+5)", "spaces import numpy as np from env.IDM import IDM from", "if self.ego.leaderID is not None: # compute longitudinal time gap", "alpha) * abs( # self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) # else: #", "self.sumoCmd += ['--random'] else: self.sumoCmd += ['--seed', str(seed)] # gui", "is not None: # # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) #", "change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert 0", "position and speed # [0, 0, 0], # leader #", "-delta_V2 - math.sqrt(delta_V2 ** 2 + 2 * delta_A2 *", "learning) self.action_space = spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def", "# self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) # else: # r_safe_tgtleader = 0", "# print('reset on: too close to ramp entrance, dis2targetlane:', #", "sometimes learning) self.action_space = spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,))", "is None' assert self.egoID in self.vehID_tuple_all, 'vehicle not in env'", "'self.done is not False' assert action is not None, 'action", "os import sys import random import datetime import gym from", "traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit)", "# lateral control------------------------- # episode in progress; 0:change back to", "2 + 2 * delta_A2 * delta_D2) TTC2 = TTC2", "return undefined results self.info = { 'resetFlag': 0} # (dict):", "* 4 + 3] = veh.acce else: self.observation[name * 4", "not self.is_success: if action_lateral == 1: # and abs(self.ego.pos_lat -", "original lane action[1] = 2: keep in current lateral position", "# set ego vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor =", "1 and 0 < TTC2 < 2: r_long_t = -", "def update_veh_dict(self, veh_id_tuple): for veh_id in veh_id_tuple: if veh_id not", "episode in progress; 0:change back to original line; 1:lane change", "['--seed', str(seed)] # gui if gui is True: self.sumoBinary +=", "/ 3.2 assert 0 <= alpha <= 1.1 r_lat_c =", "<= alpha <= 1.1 r_lat_c = -math.exp(-4*alpha+5) else: r_lat_c =", "= self.ego.pos_lat self.observation[3] = self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3,", "= True return self.observation, 0.0, self.done, self.info else: self.updateObservation() self.reward", "= datetime.datetime.now().microsecond else: self.randomseed = seed random.seed(self.randomseed) def reset(self, egoid,", "self.collision_num) def preStep(self): traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self,", "# print('alpha', alpha) # assert 0 <= alpha <= 1.1", "self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert 0 <= alpha <= 1.1 r_lat_c", "/ 3.2 assert 0 <= alpha <= 1.1 r_lat_t =", "lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert", "self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj =", "# todo check if rational def updateObservation(self): self.observation[0] = self.ego.lanePos", "must ensure safety in preStpe self.preStep() if self.timestep > 5000:", "self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd,", "close to ramp entrance, dis2targetlane:', # self.ego.dis2tgtLane) # ego vehicle", "# episode in progress; 0:change back to original line; 1:lane", "results self.info = { 'resetFlag': 0} # (dict): contains auxiliary", "for veh_id in veh_id_tuple: if veh_id not in self.veh_dict.keys(): self.veh_dict[veh_id]", "= None self.sumoseed = None self.veh_dict = {} self.vehID_tuple_all =", "return self.observation, 0.0, self.done, self.info else: self.updateObservation() self.reward = self.updateReward()", "3] = veh.acce else: self.observation[name * 4 + 0] =", "2: self.is_success = self.ego.changeLane(True, -1, self.rd) # longitudinal control2--------------------- acceNext", "density if traffic == 0: # average 9 vehicles self.cfg", "self.ego.pos_lat) # keep current lateral position if action_lateral == 2:", "self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) # print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex,", "True: self.info['resetFlag'] = True return self.observation, 0.0, self.done, self.info else:", "self.sumoCmd + ['--quit-on-end', str(True), '--start', str(True)] else: self.sumoCmd = [self.sumoBinary]", "= 1 # set ego vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0)", "id :param tfc: int. 0:light; 1:medium; 2:dense :return: initial observation", "+ w_longi * (1 - alpha) * abs( # self.ego.lanePos", ":param tfc: int. 0:light; 1:medium; 2:dense :return: initial observation \"\"\"", "= -delta_V2 - math.sqrt(delta_V2 ** 2 + 2 * delta_A2", "lane position and speed # [0, 0, 0], # leader", "(1 - alpha) * abs(self.ego.leaderDis) # else: # r_safe_leader =", "will not change despite the lateral position of the ego", "self.veh_dict.pop(veh_id) for veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self, name,", "r_lat_c = 0 if self.ego.targetLeaderID is not None: # compute", "current lateral position Returns: described in __init__ \"\"\" action_longi =", "self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation", "= IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation() return self.observation return def", "check traffic flow density if traffic == 0: # average", "timestep of the environment's dynamics. When end of episode is", "action[0] = others: acce = 0.0 longitudinal1: action[0] = 0:", "= 1 # reward related to comfort r_comf = wc1", "lane leader # [0, 0, 0]] # target lane follower", "class LaneChangeEnv(gym.Env): def __init__(self, id=None, traffic=1, gui=False, seed=None): # todo", "to original lane action[1] = 2: keep in current lateral", "True return self.observation, 0.0, self.done, self.info else: self.updateObservation() self.reward =", "using 'Simple Continuous lane-change model' '--lateral-resolution', str(0.8), # using 'Sublane-Model'", "vehicle id :param tfc: int. 0:light; 1:medium; 2:dense :return: initial", "self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must be string, if float/int,", "'--step-length', str(0.1), '--default.action-step-length', str(0.1)] # randomness if seed is None:", "w_longi * (r_long_c+ r_long_t) # # if self.ego.leaderID is not", "Road from env.Vehicle import Vehicle import math # add sumo/tools", "initial observation \"\"\" self.seed(randomseed) if sumoseed is None: self.sumoseed =", "is not None: self.observation[name * 4 + 0] = veh.lanePos", "math.exp(-2 * TTC2 + 5) else: r_long_t = 0 if", "= 2: keep in current lateral position Returns: described in", "+= 1 # lateral control------------------------- # episode in progress; 0:change", "None: # # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) # alpha =", "= action // 3 action_lateral = action % 3 self.lateral_action", "self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce if delta_A == 0:", "+= '-gui' self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True),", "= self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict)", "traci ###################################################################### # simulation environments class LaneChangeEnv(gym.Env): def __init__(self, id=None,", "self.observation, 0.0, self.done, self.info else: self.updateObservation() self.reward = self.updateReward() return", "veh.pos_lat self.observation[name * 4 + 3] = veh.acce else: self.observation[name", "lane-change model' '--lateral-resolution', str(0.8), # using 'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length',", "high=np.inf, shape=(20,)) def update_veh_dict(self, veh_id_tuple): for veh_id in veh_id_tuple: if", "traci.start(self.sumoCmd) self.rd = Road() self.timestep = 0 self.dt = traci.simulation.getDeltaT()", "traci.simulation.getDeltaT() self.randomseed = None self.sumoseed = None self.veh_dict = {}", "self.is_done() if self.done is True: self.info['resetFlag'] = True return self.observation,", "print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat) # keep current lateral", "0: TTC2 = - abs(delta_D2) / delta_V2 else: TTC2 =", "self.lateral_action == 0: #abort lane change alpha = abs(self.ego.pos_lat -", "self.is_success: self.done = True # print('reset on: successfully lane change,", "- self.ego.pos_lat) # keep current lateral position if action_lateral ==", "lane change action[1] = 0: abort lane change, change back", "amount of reward returned after previous action self.done = True", "back to ego's original lane if action_lateral == 0: #", "(self.ego.speed - self.ego_speedLimit) r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance", "# # r_safe = r_safe_leader + r_safe_tgtleader # total reward", "#abort lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2", "# using 'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length', str(0.1)] # randomness if", "= 0: use SUMO default action[0] = others: acce =", "environment's state. Accepts an action and returns a tuple (observation,", "of '3.0' self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd = ['-c', self.cfg, #", "+ 0] = self.observation[0] + 300. self.observation[name * 4 +", "/ 3.2 # # print('alpha', alpha) # assert 0 <=", "if seed is None: self.sumoCmd += ['--random'] else: self.sumoCmd +=", "self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if delta_A2", "print(self.observation.shape) def updateReward(self): return -self.ego.dis2tgtLane def updateReward2(self): wc1 = 1", "self.action_space = spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def update_veh_dict(self,", "self.rd) # longitudinal control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext", "= self.observation[0] + 300. self.observation[name * 4 + 1] =", "2 * delta_A2 * delta_D2) TTC2 = TTC2 / delta_A2", "reward, done, info). Args: action (object): longitudinal0: action[0] = 1:", "gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce -", "abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 # assert 0 <= alpha", "return self.observation, self.reward, self.done, self.info def seed(self, seed=None): if seed", "[self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd) self.rd = Road() self.timestep = 0", "in self.vehID_tuple_all: self.done = True # print('reset on: self.ego not", "of episode is reached, call `reset()` outside env!! to reset", "self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check if episode ends self.is_done()", "contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)", "alpha <= 1.1 # # r_safe_tgtleader = w_lateral * alpha", "self.vehID_tuple_all: self.done = True # print('reset on: self.ego not in", "action[1] = 1: lane change action[1] = 0: abort lane", "assert 0 <= alpha <= 1.1 # # r_safe_tgtleader =", "the lateral position of the ego may change lateral: action[1]", "** 2 + wc2 * self.ego.delta_acce ** 2 # reward", "self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert 0 <= alpha <= 1.1 r_lat_t", "self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) # print('left', 1.5 * self.rd.laneWidth", "self.sumoBinary += '-gui' self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end',", "= 0 # # # r_safe = r_safe_leader + r_safe_tgtleader", "math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis) TTC = TTC/delta_A if self.lateral_action", "str(seed)] # gui if gui is True: self.sumoBinary += '-gui'", "if 'SUMO_HOME' in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success')", "longitudinal time gap delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A =", "# self.observation = np.array(self.observation).flatten() # print(self.observation.shape) def updateReward(self): return -self.ego.dis2tgtLane", "# ego vehicle out of env if self.egoID not in", "self.observation = np.empty(20) self.reward = None # (float) : amount", "print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) # print('right', -(self.ego.pos_lat -", "ends self.is_done() if self.done is True: self.info['resetFlag'] = True return", "# '--lanechange.duration', str(3), # using 'Simple Continuous lane-change model' '--lateral-resolution',", "self.lateral_action == 1 and 0 < TTC2 < 2: r_long_t", "= veh.speed self.observation[name * 4 + 2] = veh.pos_lat self.observation[name", "# r_safe_leader = 0 # if self.ego.targetLeaderID is not None:", "assert 0 <= alpha <= 1.1 r_lat_t = -math.exp(-4*alpha+5) else:", "+ r_lat_t) + w_longi * (r_long_c+ r_long_t) # # if", "target lane; 2:keep current # lane change to target lane", "self.ego.trgt_laneIndex = tlane self.ego.is_ego = 1 # set ego vehicle", "* 4 + 0] = self.observation[0] + 300. self.observation[name *", "for veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self, name, veh):", "TTC2 + 5) else: r_long_t = 0 if self.lateral_action ==", "4.8 self.observation[name * 4 + 3] = 0 # todo", "= self.ego.lanePos self.observation[1] = self.ego.speed self.observation[2] = self.ego.pos_lat self.observation[3] =", "self.observation[name * 4 + 1] = veh.speed self.observation[name * 4", "sumo/tools into python environment if 'SUMO_HOME' in os.environ: tools =", "(r_long_c+ r_long_t) # # if self.ego.leaderID is not None: #", "= traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2): \"\"\"Run one timestep of", "= - wt * self.timestep r_speed = ws * (self.ego.speed", "0 self.lateral_action = 2 # self.observation = [[0, 0, 0],", "<= 1.1 # r_safe_leader = w_lateral * alpha + w_longi", "5000 timesteps') assert self.egoID in self.vehID_tuple_all, \"cannot start training while", "self.rd) # print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) # print('right',", "in veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def", "self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) # print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth))", "* abs( # self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos) # else: # r_safe_tgtleader", "is not None, 'action is None' assert self.egoID in self.vehID_tuple_all,", "self.info else: self.updateObservation() self.reward = self.updateReward() return self.observation, self.reward, self.done,", "action[1] = 2: keep in current lateral position Returns: described", "follow original lane leader action[0] = 1: follow closer leader", "= self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if", "0 # if self.ego.targetLeaderID is not None: # # print('lateralPos2tgtleader',", "elif traffic == 2: # average 19 vehicles self.cfg =", "= traci.simulation.getCollidingVehiclesNumber() if self.collision_num > 0: self.done = True #", "False, 'self.done is not False' assert action is not None,", "# r_safe = r_safe_leader + r_safe_tgtleader # total reward r_total", "reset env :param id: ego vehicle id :param tfc: int.", "while self.egoID not in self.veh_dict.keys(): # must ensure safety in", "lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert", "[0, 0, 0]] # target lane follower self.observation = np.empty(20)", "if veh_id not in veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id in list(self.veh_dict.keys()):", "* traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID)) self.ego.idm_obj = IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation() return", "episode is reached, call `reset()` outside env!! to reset this", "seed=None): # todo check traffic flow density if traffic ==", "self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic == 2: # average 19", "w_lateral * (r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t)", "traci.vehicle.setSpeed(self.egoID, vNext) # update info------------------------------ traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all)", "follow target lane leader **important**: orginal/target lane leader will not", "safety w_lateral = 1 w_longi = 1 if self.ego.leaderID is", "self.egoID in self.vehID_tuple_all, \"cannot start training while ego is not", "too close to ramp entrance, dis2targetlane:', # self.ego.dis2tgtLane) # ego", "# assert 0 <= alpha <= 1.1 # r_safe_leader =", "assert 0 <= alpha <= 1.1 # r_safe_leader = w_lateral", "['--quit-on-end', str(True), '--start', str(True)] else: self.sumoCmd = [self.sumoBinary] + self.sumoCmd", "self.ego.dis2tgtLane) # ego vehicle out of env if self.egoID not", "###################################################################### # simulation environments class LaneChangeEnv(gym.Env): def __init__(self, id=None, traffic=1,", "0: # average 9 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic", "abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) /", "/ delta_V2 else: TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2", "reached, call `reset()` outside env!! to reset this environment's state.", "1:leader; 2:target leader; 3:target follower :param id: vehicle id corresponding", "action_lateral = action % 3 self.lateral_action = action_lateral # action_longi", "'tools') sys.path.append(tools) print('success') else: sys.exit(\"please declare environment variable 'SUMO_HOME'\") import", "- self.veh_dict[self.ego.targetLeaderID].lanePos) # else: # r_safe_tgtleader = 0 # #", "* (1 - alpha) * abs(self.ego.leaderDis) # else: # r_safe_leader", "-math.exp(-4*alpha+5) else: r_lat_t = 0 r_safe = w_lateral * (r_lat_c", "- self.ego.acce if delta_A == 0: TTC = - abs(self.ego.leaderDis)/delta_V", "id corresponding to name :return: \"\"\" if veh is not", "shape=(20,)) def update_veh_dict(self, veh_id_tuple): for veh_id in veh_id_tuple: if veh_id", "spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def update_veh_dict(self, veh_id_tuple): for", "9 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic == 2: #", "declare environment variable 'SUMO_HOME'\") import traci ###################################################################### # simulation environments", "for veh_id in list(self.veh_dict.keys()): if veh_id not in veh_id_tuple: self.veh_dict.pop(veh_id)", "1 w_longi = 1 if self.ego.leaderID is not None: #", "else: # average 14 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments", "r_time + r_speed + r_effi # reward related to safety", "4 + 1] = veh.speed self.observation[name * 4 + 2]", "0 <= alpha <= 1.1 # r_safe_leader = w_lateral *", "self.done = True # print('reset on: self.collision_num:', self.collision_num) def preStep(self):", "'--default.action-step-length', str(0.1)] # randomness if seed is None: self.sumoCmd +=", "else: TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 + 2", "- wt * self.timestep r_speed = ws * (self.ego.speed -", "> 0.01: self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) # print('posLat', self.ego.pos_lat,", "# average 14 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must", "self.done = True # print('reset on: too close to ramp", "env:', self.egoID not in self.vehID_tuple_all) # collision occurs self.collision_num =", "r_speed + r_effi # reward related to safety w_lateral =", "if self.lateral_action == 0: #abort lane change alpha = abs(self.ego.pos_lat", "in env:', self.egoID not in self.vehID_tuple_all) # collision occurs self.collision_num", "in current lateral position Returns: described in __init__ \"\"\" action_longi", "\"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd = ['-c', self.cfg, # '--lanechange.duration', str(3), # using", "original lane if action_lateral == 0: # and abs(self.ego.pos_lat -", "(0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) # print('left',", "else: self.randomseed = seed random.seed(self.randomseed) def reset(self, egoid, tlane=0, tfc=1,", "environment variable 'SUMO_HOME'\") import traci ###################################################################### # simulation environments class", "action[0] = 1: follow target lane leader **important**: orginal/target lane", "ended, in which case further step() calls will return undefined", "environments class LaneChangeEnv(gym.Env): def __init__(self, id=None, traffic=1, gui=False, seed=None): #", "* (r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t) #", "ego vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit", "= w_lateral * alpha + w_longi * (1 - alpha)", "4 + 3] = 0 # todo check if rational", "0 < TTC2 < 2: r_long_t = - math.exp(-2 *", "# (float) : amount of reward returned after previous action", "1.1 r_lat_t = -math.exp(-4*alpha+5) else: r_lat_t = 0 r_safe =", "# randomness if seed is None: self.sumoCmd += ['--random'] else:", "veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self, name, veh): \"\"\"", "- self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if delta_A2 ==", "self.observation = [[0, 0, 0], # ego lane position and", "self.observation[2] = self.ego.pos_lat self.observation[3] = self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower)", "sys.exit(\"please declare environment variable 'SUMO_HOME'\") import traci ###################################################################### # simulation", "self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) #", "import IDM from env.Road import Road from env.Vehicle import Vehicle", "< 10.0: self.done = True # print('reset on: too close", "reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None): \"\"\" reset env", "'Simple Continuous lane-change model' '--lateral-resolution', str(0.8), # using 'Sublane-Model' '--step-length',", "* alpha + w_longi * (1 - alpha) * abs(self.ego.leaderDis)", "# print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) # print('right', -(self.ego.pos_lat", "lateral position if action_lateral == 2: self.is_success = self.ego.changeLane(True, -1,", "self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext = self.ego.speed + acceNext * 0.1", "self.ego = None # self.tgtLane = tgtlane self.is_success = False", "# longitudinal control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext =", "= self.ego.changeLane(True, -1, self.rd) # longitudinal control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi)", "= self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2", "leader action[0] = 1: follow target lane leader **important**: orginal/target", "\"\"\" self.seed(randomseed) if sumoseed is None: self.sumoseed = self.randomseed traci.close()", "('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)", "0.1 traci.vehicle.setSpeed(self.egoID, vNext) # update info------------------------------ traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)", "# (bool): whether the episode has ended, in which case", "import numpy as np from env.IDM import IDM from env.Road", "self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation = np.array(self.observation).flatten() #", ": amount of reward returned after previous action self.done =", "to reset this environment's state. Accepts an action and returns", "be string, if float/int, must be converted to str(float/int), instead", "acceNext * 0.1 traci.vehicle.setSpeed(self.egoID, vNext) # update info------------------------------ traci.simulationStep() self.vehID_tuple_all", "* (r_long_c+ r_long_t) # # if self.ego.leaderID is not None:", "traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check if episode ends", "-(self.ego.pos_lat - 0.5*self.rd.laneWidth)) # abort lane change, change back to", "TTC < 2: r_long_c = - math.exp(-2*TTC+5) else: r_long_c =", "sys.path.append(tools) print('success') else: sys.exit(\"please declare environment variable 'SUMO_HOME'\") import traci", "self.rd.laneWidth - self.ego.pos_lat) # keep current lateral position if action_lateral", "< TTC2 < 2: r_long_t = - math.exp(-2 * TTC2", "wt * self.timestep r_speed = ws * (self.ego.speed - self.ego_speedLimit)", "= {} self.vehID_tuple_all = () self.egoID = id self.ego =", "self.veh_dict = {} self.vehID_tuple_all = () self.egoID = id self.ego", "= action_lateral # action_longi = action[0] # action_lateral = action[1]", "= 4.8 self.observation[name * 4 + 3] = 0 #", "+ 3] = 0 # todo check if rational def", "= 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must be string, if float/int, must", "1.1 # r_safe_leader = w_lateral * alpha + w_longi *", "r_safe_tgtleader = w_lateral * alpha + w_longi * (1 -", "self.collision_num = traci.simulation.getCollidingVehiclesNumber() if self.collision_num > 0: self.done = True", "self.observation[3] = self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader) self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4,", "veh.acce else: self.observation[name * 4 + 0] = self.observation[0] +", "tfc=1, is_gui=True, sumoseed=None, randomseed=None): \"\"\" reset env :param id: ego", "entrance, dis2targetlane:', # self.ego.dis2tgtLane) # ego vehicle out of env", "(0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) # print('posLat',", "current # lane change to target lane if not self.is_success:", "veh_id_tuple: if veh_id not in self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id, self.rd)", "return r_total def is_done(self): # lane change successfully executed, episode", "leader will not change despite the lateral position of the", "def updateReward2(self): wc1 = 1 wc2 = 1 wt =", "0:light; 1:medium; 2:dense :return: initial observation \"\"\" self.seed(randomseed) if sumoseed", "continue step until ego appears in env if self.egoID is", "ego after 5000 timesteps') assert self.egoID in self.vehID_tuple_all, \"cannot start", "True # (bool): whether the episode has ended, in which", "alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 # assert 0", "self.ego.is_ego = 1 # set ego vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id,", "+ 1] = veh.speed self.observation[name * 4 + 2] =", "in __init__ \"\"\" action_longi = action // 3 action_lateral =", "traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check if episode ends self.is_done() if self.done", "= self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext = self.ego.speed + acceNext *", "env if self.egoID is not None: while self.egoID not in", "= veh.lanePos self.observation[name * 4 + 1] = veh.speed self.observation[name", "in veh_id_tuple: if veh_id not in self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id,", "self.randomseed = None self.sumoseed = None self.veh_dict = {} self.vehID_tuple_all", "= np.array(self.observation).flatten() # print(self.observation.shape) def updateReward(self): return -self.ego.dis2tgtLane def updateReward2(self):", "= - abs(self.ego.leaderDis)/delta_V else: TTC = -delta_V - math.sqrt(delta_V**2 +", "acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext) vNext = self.ego.speed + acceNext", "if self.collision_num > 0: self.done = True # print('reset on:", "step() calls will return undefined results self.info = { 'resetFlag':", "= - math.exp(-2*TTC+5) else: r_long_c = 0 if self.lateral_action ==", "= - abs(delta_D2) / delta_V2 else: TTC2 = -delta_V2 -", "(r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t) # #", "* 4 + 2] = veh.pos_lat self.observation[name * 4 +", "action[1] = 0: abort lane change, change back to original", "position Returns: described in __init__ \"\"\" action_longi = action //", "r_total = r_comf + r_effi_all + r_safe return r_total def", "total reward r_total = r_comf + r_effi_all + r_safe return", "self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed) # continue step until", "4 + 2] = 4.8 self.observation[name * 4 + 3]", "self.done = True # print('reset on: self.ego not in env:',", "env # todo modify if self.is_success: self.done = True #", "# collision occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber() if self.collision_num > 0:", "- math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis) TTC = TTC/delta_A if", "lane leader **important**: orginal/target lane leader will not change despite", "0.01: self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) # print('posLat', self.ego.pos_lat, 'lane',", "= True # (bool): whether the episode has ended, in", "leader # [0, 0, 0], # target lane leader #", "1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success =", "returns a tuple (observation, reward, done, info). Args: action (object):", "update info------------------------------ traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check if", "ego lane position and speed # [0, 0, 0], #", "0, 0], # target lane leader # [0, 0, 0]]", "alpha + w_longi * (1 - alpha) * abs(self.ego.leaderDis) #", "# # r_safe_tgtleader = w_lateral * alpha + w_longi *", "lane change successfully executed, episode ends, reset env # todo", "+ ['--quit-on-end', str(True), '--start', str(True)] else: self.sumoCmd = [self.sumoBinary] +", "(bool): whether the episode has ended, in which case further", "is None: self.randomseed = datetime.datetime.now().microsecond else: self.randomseed = seed random.seed(self.randomseed)", "seed is None: self.randomseed = datetime.datetime.now().microsecond else: self.randomseed = seed", "['-c', self.cfg, # '--lanechange.duration', str(3), # using 'Simple Continuous lane-change", "0] = veh.lanePos self.observation[name * 4 + 1] = veh.speed", "ensure safety in preStpe self.preStep() if self.timestep > 5000: raise", "== 0: TTC2 = - abs(delta_D2) / delta_V2 else: TTC2", "- abs(self.ego.leaderDis)/delta_V else: TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A", "r_lat_t) + w_longi * (r_long_c+ r_long_t) # # if self.ego.leaderID", "ego is not in env\" self.done = False self.ego =", "vehicle id corresponding to name :return: \"\"\" if veh is", "= None self.veh_dict = {} self.vehID_tuple_all = () self.egoID =", "2*delta_A * self.ego.leaderDis) TTC = TTC/delta_A if self.lateral_action != 1", "occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber() if self.collision_num > 0: self.done =", "related to efficiency r_time = - wt * self.timestep r_speed", "-1: decelerate action[0] = 0: use SUMO default action[0] =", "name: 0:ego; 1:leader; 2:target leader; 3:target follower :param id: vehicle", "# lane change successfully executed, episode ends, reset env #", "'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average 14 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' #", "0: TTC = - abs(self.ego.leaderDis)/delta_V else: TTC = -delta_V -", "updateObservation(self): self.observation[0] = self.ego.lanePos self.observation[1] = self.ego.speed self.observation[2] = self.ego.pos_lat", "0, 0], # leader # [0, 0, 0], # target", "- math.exp(-2 * TTC2 + 5) else: r_long_t = 0", "# r_safe_tgtleader = w_lateral * alpha + w_longi * (1", "info). Args: action (object): longitudinal0: action[0] = 1: accelerate action[0]", "change back to original lane action[1] = 2: keep in", "== 0: # average 9 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif", "# ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat)) # alpha = abs(self.ego.pos_lat -", "self.info['resetFlag'] = True return self.observation, 0.0, self.done, self.info else: self.updateObservation()", "+= ['--seed', str(seed)] # gui if gui is True: self.sumoBinary", "# abort lane change, change back to ego's original lane", "4 + 2] = veh.pos_lat self.observation[name * 4 + 3]", "if action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) >", "0 if self.lateral_action == 0: #abort lane change alpha =", "'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length', str(0.1)] # randomness if seed is", "env!! to reset this environment's state. Accepts an action and", "0], # target lane leader # [0, 0, 0]] #", "self.done is True: self.info['resetFlag'] = True return self.observation, 0.0, self.done,", "0 self.dt = traci.simulation.getDeltaT() self.randomseed = None self.sumoseed = None", "if rational def updateObservation(self): self.observation[0] = self.ego.lanePos self.observation[1] = self.ego.speed", "# print(self.observation.shape) def updateReward(self): return -self.ego.dis2tgtLane def updateReward2(self): wc1 =", "lane leader action[0] = 1: follow closer leader longitudinal2: action[0]", "original lane leader action[0] = 1: follow target lane leader", "False' assert action is not None, 'action is None' assert", "not in env\" self.done = False self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex", "= [[0, 0, 0], # ego lane position and speed", "* self.ego.leaderDis) TTC = TTC/delta_A if self.lateral_action != 1 and", "w_longi * (1 - alpha) * abs( # self.ego.lanePos -", "variable 'SUMO_HOME'\") import traci ###################################################################### # simulation environments class LaneChangeEnv(gym.Env):", "keep in current lateral position Returns: described in __init__ \"\"\"", "# print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth)) # abort lane change, change", "sumoseed=None, randomseed=None): \"\"\" reset env :param id: ego vehicle id", "raise Exception('cannot find ego after 5000 timesteps') assert self.egoID in", "self.vehID_tuple_all, \"cannot start training while ego is not in env\"", "19 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average 14 vehicles", "\"\"\" reset env :param id: ego vehicle id :param tfc:", "2:keep current # lane change to target lane if not", "# print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha = abs(self.ego.pos_lat -", "self.rd.laneWidth) # print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth)) # abort lane change,", "returned after previous action self.done = True # (bool): whether", "self.veh_dict[self.ego.leaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 #", "self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane self.ego.is_ego = 1 #", "self.reward = None # (float) : amount of reward returned", "longitudinal1: action[0] = 0: follow original lane leader action[0] =", "despite the lateral position of the ego may change lateral:", "- self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos", "action is not None, 'action is None' assert self.egoID in", "r_safe_tgtleader = 0 # # # r_safe = r_safe_leader +", "> 0.01: self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) # print('left', 1.5", "is None: self.sumoseed = self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed)", "self.observation[name * 4 + 0] = veh.lanePos self.observation[name * 4", "simulation environments class LaneChangeEnv(gym.Env): def __init__(self, id=None, traffic=1, gui=False, seed=None):", "if seed is None: self.randomseed = datetime.datetime.now().microsecond else: self.randomseed =", "= r_time + r_speed + r_effi # reward related to", "delta_V2 else: TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 +", "print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)", "= 0 self.dt = traci.simulation.getDeltaT() self.randomseed = None self.sumoseed =", "14 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must be string,", "self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos -", "'-gui' self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True), '--start',", "self.vehID_tuple_all, 'vehicle not in env' self.timestep += 1 # lateral", "dis2targetlane:', # self.ego.dis2tgtLane) # ego vehicle out of env if", "datetime.datetime.now().microsecond else: self.randomseed = seed random.seed(self.randomseed) def reset(self, egoid, tlane=0,", "else: self.updateObservation() self.reward = self.updateReward() return self.observation, self.reward, self.done, self.info", "self.ego.targetLeaderID is not None: # # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat))", "# (dict): contains auxiliary diagnostic information (helpful for debugging, and", "vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg' # arguments must be string, if", "= self.observation[1] self.observation[name * 4 + 2] = 4.8 self.observation[name", "# r_safe_leader = w_lateral * alpha + w_longi * (1", "self.sumoseed = self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed) # continue", "= tgtlane self.is_success = False self.collision_num = 0 self.lateral_action =", "0} # (dict): contains auxiliary diagnostic information (helpful for debugging,", "r_long_c = - math.exp(-2*TTC+5) else: r_long_c = 0 if self.lateral_action", "When end of episode is reached, call `reset()` outside env!!", "str(0.1)] # randomness if seed is None: self.sumoCmd += ['--random']", "delta_A2 * delta_D2) TTC2 = TTC2 / delta_A2 if self.lateral_action", "longitudinal2: action[0] = 0: follow original lane leader action[0] =", "if float/int, must be converted to str(float/int), instead of '3.0'", "id: ego vehicle id :param tfc: int. 0:light; 1:medium; 2:dense", "not in env:', self.egoID not in self.vehID_tuple_all) # collision occurs", "gym from gym import spaces import numpy as np from", "# total reward r_total = r_comf + r_effi_all + r_safe", "vNext = self.ego.speed + acceNext * 0.1 traci.vehicle.setSpeed(self.egoID, vNext) #", "ego appears in env if self.egoID is not None: while", "not False' assert action is not None, 'action is None'", "for debugging, and sometimes learning) self.action_space = spaces.Discrete(6) self.observation_space =", "- self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce if delta_A ==", "lateral control------------------------- # episode in progress; 0:change back to original", "abort lane change, change back to original lane action[1] =", "w_lateral = 1 w_longi = 1 if self.ego.leaderID is not", "TTC/delta_A if self.lateral_action != 1 and 0 < TTC <", "-delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis) TTC = TTC/delta_A", "closer leader longitudinal2: action[0] = 0: follow original lane leader", "= self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane self.ego.is_ego = 1 # set", "todo check if rational def updateObservation(self): self.observation[0] = self.ego.lanePos self.observation[1]", "self.sumoseed = None self.veh_dict = {} self.vehID_tuple_all = () self.egoID", "find ego after 5000 timesteps') assert self.egoID in self.vehID_tuple_all, \"cannot", "alpha + w_longi * (1 - alpha) * abs( #", "* (self.ego.speed - self.ego_speedLimit) r_effi = we * self.ego.dis2tgtLane /", "tuple (observation, reward, done, info). Args: action (object): longitudinal0: action[0]", "lane action[1] = 2: keep in current lateral position Returns:", "self.rd) # print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat) # keep", "self.observation, self.reward, self.done, self.info def seed(self, seed=None): if seed is", "leader # [0, 0, 0]] # target lane follower self.observation", "self.sumoCmd traci.start(self.sumoCmd) self.rd = Road() self.timestep = 0 self.dt =", "+ 2] = 4.8 self.observation[name * 4 + 3] =", "* abs(self.ego.leaderDis) # else: # r_safe_leader = 0 # if", "self.ego.delta_acce ** 2 # reward related to efficiency r_time =", "= self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if delta_A2 == 0: TTC2 =", "model' '--lateral-resolution', str(0.8), # using 'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length', str(0.1)]", "of the environment's dynamics. When end of episode is reached,", "= True # print('reset on: self.ego not in env:', self.egoID", "the episode has ended, in which case further step() calls", "self.timestep = 0 self.dt = traci.simulation.getDeltaT() self.randomseed = None self.sumoseed", "= 1 if self.ego.leaderID is not None: # compute longitudinal", "is not None: # compute longitudinal time gap delta_V2 =", "# check if episode ends self.is_done() if self.done is True:", "None: self.randomseed = datetime.datetime.now().microsecond else: self.randomseed = seed random.seed(self.randomseed) def", "if traffic == 0: # average 9 vehicles self.cfg =", "= 0 r_safe = w_lateral * (r_lat_c + r_lat_t) +", "action_longi = action[0] # action_lateral = action[1] assert self.done is", "else: TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis)", "import gym from gym import spaces import numpy as np", "if self.lateral_action == 1 and 0 < TTC2 < 2:", "# print('reset on: successfully lane change, dis2targetlane:', # self.ego.dis2tgtLane) #", "== 2: self.is_success = self.ego.changeLane(True, -1, self.rd) # longitudinal control2---------------------", "['--random'] else: self.sumoCmd += ['--seed', str(seed)] # gui if gui", "0: #abort lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) /", "preStep(self): traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2): \"\"\"Run", "> 0: self.done = True # print('reset on: self.collision_num:', self.collision_num)", "1: lane change action[1] = 0: abort lane change, change", "if not self.is_success: if action_lateral == 1: # and abs(self.ego.pos_lat", "self.update_veh_dict(self.vehID_tuple_all) # check if episode ends self.is_done() if self.done is", "undefined results self.info = { 'resetFlag': 0} # (dict): contains", "# print('reset on: self.collision_num:', self.collision_num) def preStep(self): traci.simulationStep() self.vehID_tuple_all =", "- alpha) * abs(self.ego.leaderDis) # else: # r_safe_leader = 0", "0:change back to original line; 1:lane change to target lane;", "= abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert 0 <= alpha", "alpha <= 1.1 r_lat_t = -math.exp(-4*alpha+5) else: r_lat_t = 0", "self.egoID not in self.veh_dict.keys(): # must ensure safety in preStpe", "# ego lane position and speed # [0, 0, 0],", "self.ego_speedLimit) r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance r_effi_all =", "- self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2", "Vehicle import math # add sumo/tools into python environment if", "= -1: decelerate action[0] = 0: use SUMO default action[0]", "= tlane self.ego.is_ego = 1 # set ego vehicle speed", "= self.updateReward() return self.observation, self.reward, self.done, self.info def seed(self, seed=None):", "r_comf + r_effi_all + r_safe return r_total def is_done(self): #", "Continuous lane-change model' '--lateral-resolution', str(0.8), # using 'Sublane-Model' '--step-length', str(0.1),", "0], # ego lane position and speed # [0, 0,", "gui if gui is True: self.sumoBinary += '-gui' self.sumoCmd =", "str(True), '--start', str(True)] else: self.sumoCmd = [self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd)", "change to target lane; 2:keep current # lane change to", "= id self.ego = None # self.tgtLane = tgtlane self.is_success", "# # if self.ego.leaderID is not None: # # ('lateralPos2leader',", "outside env!! to reset this environment's state. Accepts an action", "True # print('reset on: too close to ramp entrance, dis2targetlane:',", "\"cannot start training while ego is not in env\" self.done", "if delta_A == 0: TTC = - abs(self.ego.leaderDis)/delta_V else: TTC", "info------------------------------ traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check if episode", "action (object): longitudinal0: action[0] = 1: accelerate action[0] = -1:", "+ self.sumoCmd + ['--quit-on-end', str(True), '--start', str(True)] else: self.sumoCmd =", "None self.veh_dict = {} self.vehID_tuple_all = () self.egoID = id", "traffic=tfc, gui=is_gui, seed=self.sumoseed) # continue step until ego appears in", "__init__ \"\"\" action_longi = action // 3 action_lateral = action", "= veh.acce else: self.observation[name * 4 + 0] = self.observation[0]", "(dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes", "self.sumoCmd = ['-c', self.cfg, # '--lanechange.duration', str(3), # using 'Simple", "self.egoID is not None: while self.egoID not in self.veh_dict.keys(): #", "gui=is_gui, seed=self.sumoseed) # continue step until ego appears in env", "# print('reset on: self.ego not in env:', self.egoID not in", "alpha <= 1.1 r_lat_c = -math.exp(-4*alpha+5) else: r_lat_c = 0", "to name :return: \"\"\" if veh is not None: self.observation[name", "1:lane change to target lane; 2:keep current # lane change", "'--lateral-resolution', str(0.8), # using 'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length', str(0.1)] #", "default action[0] = others: acce = 0.0 longitudinal1: action[0] =", "1:medium; 2:dense :return: initial observation \"\"\" self.seed(randomseed) if sumoseed is", "+= ['--random'] else: self.sumoCmd += ['--seed', str(seed)] # gui if", "observation \"\"\" self.seed(randomseed) if sumoseed is None: self.sumoseed = self.randomseed", "self.randomseed = datetime.datetime.now().microsecond else: self.randomseed = seed random.seed(self.randomseed) def reset(self,", "if delta_A2 == 0: TTC2 = - abs(delta_D2) / delta_V2", "reward related to safety w_lateral = 1 w_longi = 1", "< TTC < 2: r_long_c = - math.exp(-2*TTC+5) else: r_long_c", "-1, self.rd) # longitudinal control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) # print(acceNext)", "'--lanechange.duration', str(3), # using 'Simple Continuous lane-change model' '--lateral-resolution', str(0.8),", "def preStep(self): traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2):", "name, veh): \"\"\" :param name: 0:ego; 1:leader; 2:target leader; 3:target", "4 + 0] = self.observation[0] + 300. self.observation[name * 4", "Accepts an action and returns a tuple (observation, reward, done,", "progress; 0:change back to original line; 1:lane change to target", "is not False' assert action is not None, 'action is", "r_lat_c = -math.exp(-4*alpha+5) else: r_lat_c = 0 if self.ego.targetLeaderID is", "self.lateral_action = 2 # self.observation = [[0, 0, 0], #", "self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation = np.array(self.observation).flatten() # print(self.observation.shape)", "2:dense :return: initial observation \"\"\" self.seed(randomseed) if sumoseed is None:", "time gap delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce", "# reward related to comfort r_comf = wc1 * self.ego.acce", "self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation() return self.observation return def close(self): traci.close()", "lane change, change back to original lane action[1] = 2:", "lane if action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth)", "'SUMO_HOME'\") import traci ###################################################################### # simulation environments class LaneChangeEnv(gym.Env): def", "veh is not None: self.observation[name * 4 + 0] =", "episode has ended, in which case further step() calls will", "None self.sumoseed = None self.veh_dict = {} self.vehID_tuple_all = ()", "compute longitudinal time gap delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2", "def _updateObservationSingle(self, name, veh): \"\"\" :param name: 0:ego; 1:leader; 2:target", "1 # reward related to comfort r_comf = wc1 *", "longitudinal0: action[0] = 1: accelerate action[0] = -1: decelerate action[0]", "# using 'Simple Continuous lane-change model' '--lateral-resolution', str(0.8), # using", "TTC2 < 2: r_long_t = - math.exp(-2 * TTC2 +", "= 0 # if self.ego.targetLeaderID is not None: # #", "wc2 = 1 wt = 1 ws = 1 we", "self.done = True # (bool): whether the episode has ended,", "assert self.egoID in self.vehID_tuple_all, \"cannot start training while ego is", "= TTC/delta_A if self.lateral_action != 1 and 0 < TTC", "3.2 # assert 0 <= alpha <= 1.1 # r_safe_leader", "follow original lane leader action[0] = 1: follow target lane", "# simulation environments class LaneChangeEnv(gym.Env): def __init__(self, id=None, traffic=1, gui=False,", "0 # # # r_safe = r_safe_leader + r_safe_tgtleader #", "converted to str(float/int), instead of '3.0' self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd", "TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis) TTC", "whether the episode has ended, in which case further step()", "w_longi * (1 - alpha) * abs(self.ego.leaderDis) # else: #", "gym import spaces import numpy as np from env.IDM import", "tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else: sys.exit(\"please declare environment", "self.ego.dis2tgtLane) # too close to ramp entrance if self.ego.dis2entrance <", "self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def update_veh_dict(self, veh_id_tuple): for veh_id", "# action_longi = action[0] # action_lateral = action[1] assert self.done", "alpha <= 1.1 # r_safe_leader = w_lateral * alpha +", "1 ws = 1 we = 1 # reward related", "action[1] assert self.done is False, 'self.done is not False' assert", "and 0 < TTC2 < 2: r_long_t = - math.exp(-2", "change, change back to original lane action[1] = 2: keep", "def is_done(self): # lane change successfully executed, episode ends, reset", "not None: # # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha", "lane leader will not change despite the lateral position of", "self._updateObservationSingle(2, self.ego.orig_follower) self._updateObservationSingle(3, self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation = np.array(self.observation).flatten()", "== 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success", "# print(acceNext) vNext = self.ego.speed + acceNext * 0.1 traci.vehicle.setSpeed(self.egoID,", "= self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed) # continue step", "else: r_long_c = 0 if self.lateral_action == 0: #abort lane", "False self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane self.ego.is_ego = 1", "= r_comf + r_effi_all + r_safe return r_total def is_done(self):", "self.randomseed = seed random.seed(self.randomseed) def reset(self, egoid, tlane=0, tfc=1, is_gui=True,", "= wc1 * self.ego.acce ** 2 + wc2 * self.ego.delta_acce", "in list(self.veh_dict.keys()): if veh_id not in veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id", "3.2 assert 0 <= alpha <= 1.1 r_lat_t = -math.exp(-4*alpha+5)", "alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 # # print('alpha',", "0]] # target lane follower self.observation = np.empty(20) self.reward =", "if self.egoID not in self.vehID_tuple_all: self.done = True # print('reset", "+ wc2 * self.ego.delta_acce ** 2 # reward related to", "sumoseed is None: self.sumoseed = self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui,", "'3.0' self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd = ['-c', self.cfg, # '--lanechange.duration',", "action_lateral == 2: self.is_success = self.ego.changeLane(True, -1, self.rd) # longitudinal", "action=2): \"\"\"Run one timestep of the environment's dynamics. When end", "float/int, must be converted to str(float/int), instead of '3.0' self.sumoBinary", "in which case further step() calls will return undefined results", "leader; 3:target follower :param id: vehicle id corresponding to name", "= { 'resetFlag': 0} # (dict): contains auxiliary diagnostic information", "spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def update_veh_dict(self, veh_id_tuple): for veh_id in veh_id_tuple:", "2: keep in current lateral position Returns: described in __init__", "self.is_success: if action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth)", "0 if self.ego.targetLeaderID is not None: # compute longitudinal time", "ego's original lane if action_lateral == 0: # and abs(self.ego.pos_lat", "= None # (float) : amount of reward returned after", "= os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else: sys.exit(\"please declare environment variable", "self.egoID not in self.vehID_tuple_all) # collision occurs self.collision_num = traci.simulation.getCollidingVehiclesNumber()", "appears in env if self.egoID is not None: while self.egoID", "tlane self.ego.is_ego = 1 # set ego vehicle speed mode", "0: abort lane change, change back to original lane action[1]", "_updateObservationSingle(self, name, veh): \"\"\" :param name: 0:ego; 1:leader; 2:target leader;", "0.5*self.rd.laneWidth)) # abort lane change, change back to ego's original", "{} self.vehID_tuple_all = () self.egoID = id self.ego = None", "tgtlane self.is_success = False self.collision_num = 0 self.lateral_action = 2", "IDM from env.Road import Road from env.Vehicle import Vehicle import", "# lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2", "IDM() self.ego.idm_obj.__init__(self.ego_speedLimit) self.ego.update_info(self.rd, self.veh_dict) self.updateObservation() return self.observation return def close(self):", "ws * (self.ego.speed - self.ego_speedLimit) r_effi = we * self.ego.dis2tgtLane", "1 # set ego vehicle speed mode traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor", "action[0] = 0: use SUMO default action[0] = others: acce", "is None: self.sumoCmd += ['--random'] else: self.sumoCmd += ['--seed', str(seed)]", "if action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) >", "* self.rd.laneWidth - self.ego.pos_lat) # keep current lateral position if", "in env if self.egoID is not None: while self.egoID not", "if self.is_success: self.done = True # print('reset on: successfully lane", "False self.collision_num = 0 self.lateral_action = 2 # self.observation =", ":param name: 0:ego; 1:leader; 2:target leader; 3:target follower :param id:", "if veh_id not in self.veh_dict.keys(): self.veh_dict[veh_id] = Vehicle(veh_id, self.rd) for", "assert 0 <= alpha <= 1.1 r_lat_c = -math.exp(-4*alpha+5) else:", "end of episode is reached, call `reset()` outside env!! to", "random import datetime import gym from gym import spaces import", "# reward related to efficiency r_time = - wt *", "diagnostic information (helpful for debugging, and sometimes learning) self.action_space =", "the ego may change lateral: action[1] = 1: lane change", "self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2 =", "action[0] = 0: follow original lane leader action[0] = 1:", "and speed # [0, 0, 0], # leader # [0,", "< 2: r_long_c = - math.exp(-2*TTC+5) else: r_long_c = 0", "acce = 0.0 longitudinal1: action[0] = 0: follow original lane", "tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None): \"\"\" reset env :param id:", "is not in env\" self.done = False self.ego = self.veh_dict[self.egoID]", "self.collision_num:', self.collision_num) def preStep(self): traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) def", "# and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True,", "1 # lateral control------------------------- # episode in progress; 0:change back", "seed(self, seed=None): if seed is None: self.randomseed = datetime.datetime.now().microsecond else:", "ends, reset env # todo modify if self.is_success: self.done =", "= veh.pos_lat self.observation[name * 4 + 3] = veh.acce else:", "self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if delta_A2 == 0: TTC2 = -", "after previous action self.done = True # (bool): whether the", ":param id: ego vehicle id :param tfc: int. 0:light; 1:medium;", "// 3 action_lateral = action % 3 self.lateral_action = action_lateral", "= 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic == 2: # average 19 vehicles", "- (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd) #", "3.2 # # print('alpha', alpha) # assert 0 <= alpha", "action[0] # action_lateral = action[1] assert self.done is False, 'self.done", "in os.environ: tools = os.path.join(os.environ['SUMO_HOME'], 'tools') sys.path.append(tools) print('success') else: sys.exit(\"please", "of env if self.egoID not in self.vehID_tuple_all: self.done = True", "True # print('reset on: self.ego not in env:', self.egoID not", "r_lat_t = 0 r_safe = w_lateral * (r_lat_c + r_lat_t)", "10.0: self.done = True # print('reset on: too close to", "id: vehicle id corresponding to name :return: \"\"\" if veh", "not in self.vehID_tuple_all: self.done = True # print('reset on: self.ego", "veh): \"\"\" :param name: 0:ego; 1:leader; 2:target leader; 3:target follower", "return -self.ego.dis2tgtLane def updateReward2(self): wc1 = 1 wc2 = 1", "w_lateral * alpha + w_longi * (1 - alpha) *", "ego vehicle id :param tfc: int. 0:light; 1:medium; 2:dense :return:", "r_long_t = 0 if self.lateral_action == 1: # lane change", "vehicle out of env if self.egoID not in self.vehID_tuple_all: self.done", "env :param id: ego vehicle id :param tfc: int. 0:light;", "veh_id in list(self.veh_dict.keys()): if veh_id not in veh_id_tuple: self.veh_dict.pop(veh_id) for", "r_effi # reward related to safety w_lateral = 1 w_longi", "= action[1] assert self.done is False, 'self.done is not False'", "if self.egoID is not None: while self.egoID not in self.veh_dict.keys():", "= -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis) TTC =", "# self.observation = [[0, 0, 0], # ego lane position", "assert action is not None, 'action is None' assert self.egoID", "self.observation[0] + 300. self.observation[name * 4 + 1] = self.observation[1]", "environment's dynamics. When end of episode is reached, call `reset()`", "env\" self.done = False self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane", "action[0] = 1: accelerate action[0] = -1: decelerate action[0] =", "# and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True,", "self.observation[name * 4 + 1] = self.observation[1] self.observation[name * 4", "None, 'action is None' assert self.egoID in self.vehID_tuple_all, 'vehicle not", "None: # # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat)) # alpha =", "2: r_long_c = - math.exp(-2*TTC+5) else: r_long_c = 0 if", "# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 # assert", "to original line; 1:lane change to target lane; 2:keep current", "= 0 self.lateral_action = 2 # self.observation = [[0, 0,", "= 0 # todo check if rational def updateObservation(self): self.observation[0]", "TTC2 / delta_A2 if self.lateral_action == 1 and 0 <", "= 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average 14 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg'", "0] = self.observation[0] + 300. self.observation[name * 4 + 1]", ":return: \"\"\" if veh is not None: self.observation[name * 4", "(observation, reward, done, info). Args: action (object): longitudinal0: action[0] =", "accelerate action[0] = -1: decelerate action[0] = 0: use SUMO", "3:target follower :param id: vehicle id corresponding to name :return:", "self.ego.dis2entrance < 10.0: self.done = True # print('reset on: too", "= np.empty(20) self.reward = None # (float) : amount of", "todo check traffic flow density if traffic == 0: #", "None: self.sumoseed = self.randomseed traci.close() self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed) #", "else: sys.exit(\"please declare environment variable 'SUMO_HOME'\") import traci ###################################################################### #", "related to comfort r_comf = wc1 * self.ego.acce ** 2", "self.observation[name * 4 + 3] = 0 # todo check", "str(0.8), # using 'Sublane-Model' '--step-length', str(0.1), '--default.action-step-length', str(0.1)] # randomness", "print('reset on: self.collision_num:', self.collision_num) def preStep(self): traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)", "None # self.tgtLane = tgtlane self.is_success = False self.collision_num =", "may change lateral: action[1] = 1: lane change action[1] =", "def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None): \"\"\" reset", "abs(delta_D2) / delta_V2 else: TTC2 = -delta_V2 - math.sqrt(delta_V2 **", "traci.vehicle.setSpeedMode(self.ego.veh_id, 0) self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid) self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID))", "0:ego; 1:leader; 2:target leader; 3:target follower :param id: vehicle id", "egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None): \"\"\" reset env :param", "to ego's original lane if action_lateral == 0: # and", "self.timestep r_speed = ws * (self.ego.speed - self.ego_speedLimit) r_effi =", "< 2: r_long_t = - math.exp(-2 * TTC2 + 5)", "# action_lateral = action[1] assert self.done is False, 'self.done is", "abs(self.ego.leaderDis)/delta_V else: TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A *", "from env.Vehicle import Vehicle import math # add sumo/tools into", "= [self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd) self.rd = Road() self.timestep =", "must be converted to str(float/int), instead of '3.0' self.sumoBinary =", "change successfully executed, episode ends, reset env # todo modify", "<= alpha <= 1.1 # # r_safe_tgtleader = w_lateral *", "def __init__(self, id=None, traffic=1, gui=False, seed=None): # todo check traffic", "# self.tgtLane = tgtlane self.is_success = False self.collision_num = 0", "after 5000 timesteps') assert self.egoID in self.vehID_tuple_all, \"cannot start training", "2] = 4.8 self.observation[name * 4 + 3] = 0", "math.exp(-2*TTC+5) else: r_long_c = 0 if self.lateral_action == 0: #abort", "- abs(delta_D2) / delta_V2 else: TTC2 = -delta_V2 - math.sqrt(delta_V2", "veh.speed self.observation[name * 4 + 2] = veh.pos_lat self.observation[name *", "True: self.sumoBinary += '-gui' self.sumoCmd = [self.sumoBinary] + self.sumoCmd +", "of reward returned after previous action self.done = True #", "# continue step until ego appears in env if self.egoID", "0], # leader # [0, 0, 0], # target lane", "- self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert 0 <= alpha <= 1.1", "step until ego appears in env if self.egoID is not", "updateReward(self): return -self.ego.dis2tgtLane def updateReward2(self): wc1 = 1 wc2 =", "not change despite the lateral position of the ego may", "delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if delta_A2 == 0: TTC2", "action self.done = True # (bool): whether the episode has", "alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert 0 <=", "1 and 0 < TTC < 2: r_long_c = -", "is_gui=True, sumoseed=None, randomseed=None): \"\"\" reset env :param id: ego vehicle", "abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 assert 0 <= alpha <=", "and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01: self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex,", "# # # r_safe = r_safe_leader + r_safe_tgtleader # total", "successfully executed, episode ends, reset env # todo modify if", "None' assert self.egoID in self.vehID_tuple_all, 'vehicle not in env' self.timestep", "= [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True), '--start', str(True)] else:", "= False self.collision_num = 0 self.lateral_action = 2 # self.observation", "<= 1.1 r_lat_t = -math.exp(-4*alpha+5) else: r_lat_t = 0 r_safe", "= w_lateral * (r_lat_c + r_lat_t) + w_longi * (r_long_c+", "0: self.done = True # print('reset on: self.collision_num:', self.collision_num) def", "# add sumo/tools into python environment if 'SUMO_HOME' in os.environ:", "# todo modify if self.is_success: self.done = True # print('reset", "abs(self.ego.leaderDis) # else: # r_safe_leader = 0 # if self.ego.targetLeaderID", "# lane change to target lane if not self.is_success: if", "traffic == 2: # average 19 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg'", "todo modify if self.is_success: self.done = True # print('reset on:", "= 0.0 longitudinal1: action[0] = 0: follow original lane leader", "# update info------------------------------ traci.simulationStep() self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID) self.update_veh_dict(self.vehID_tuple_all) # check", "in progress; 0:change back to original line; 1:lane change to", "5) else: r_long_t = 0 if self.lateral_action == 1: #", "original line; 1:lane change to target lane; 2:keep current #", "else: # r_safe_tgtleader = 0 # # # r_safe =", "call `reset()` outside env!! to reset this environment's state. Accepts", "self.collision_num = 0 self.lateral_action = 2 # self.observation = [[0,", "# compute longitudinal time gap delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed", "1 if self.ego.leaderID is not None: # compute longitudinal time", "from gym import spaces import numpy as np from env.IDM", "= ws * (self.ego.speed - self.ego_speedLimit) r_effi = we *", "= 0 if self.lateral_action == 1: # lane change alpha", "True # print('reset on: successfully lane change, dis2targetlane:', # self.ego.dis2tgtLane)", "to str(float/int), instead of '3.0' self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd =", "self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos if delta_A2 == 0:", "* delta_D2) TTC2 = TTC2 / delta_A2 if self.lateral_action ==", "to safety w_lateral = 1 w_longi = 1 if self.ego.leaderID", "0 if self.lateral_action == 1: # lane change alpha =", "= 1 w_longi = 1 if self.ego.leaderID is not None:", "comfort r_comf = wc1 * self.ego.acce ** 2 + wc2", "np.array(self.observation).flatten() # print(self.observation.shape) def updateReward(self): return -self.ego.dis2tgtLane def updateReward2(self): wc1", "change, change back to ego's original lane if action_lateral ==", "wt = 1 ws = 1 we = 1 #", "self.ego.lanePos self.observation[1] = self.ego.speed self.observation[2] = self.ego.pos_lat self.observation[3] = self.ego.acce", "information (helpful for debugging, and sometimes learning) self.action_space = spaces.Discrete(6)", "control------------------------- # episode in progress; 0:change back to original line;", "in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd, self.veh_dict) def _updateObservationSingle(self, name, veh): \"\"\" :param", "else: # r_safe_leader = 0 # if self.ego.targetLeaderID is not", "self.collision_num > 0: self.done = True # print('reset on: self.collision_num:',", "'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic == 2: # average 19 vehicles self.cfg", "-self.ego.dis2tgtLane def updateReward2(self): wc1 = 1 wc2 = 1 wt", "action and returns a tuple (observation, reward, done, info). Args:", "# todo check traffic flow density if traffic == 0:", "w_longi = 1 if self.ego.leaderID is not None: # compute", "name :return: \"\"\" if veh is not None: self.observation[name *", "+ self.sumoCmd traci.start(self.sumoCmd) self.rd = Road() self.timestep = 0 self.dt", "= r_safe_leader + r_safe_tgtleader # total reward r_total = r_comf", "in env\" self.done = False self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex =", "to target lane; 2:keep current # lane change to target", "self.veh_dict) def _updateObservationSingle(self, name, veh): \"\"\" :param name: 0:ego; 1:leader;", "4 + 3] = veh.acce else: self.observation[name * 4 +", "str(3), # using 'Simple Continuous lane-change model' '--lateral-resolution', str(0.8), #", "compute longitudinal time gap delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A", "# self.ego.dis2tgtLane) # ego vehicle out of env if self.egoID", "position if action_lateral == 2: self.is_success = self.ego.changeLane(True, -1, self.rd)", "None: self.sumoCmd += ['--random'] else: self.sumoCmd += ['--seed', str(seed)] #", "self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth) # print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth)) # abort", "abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2 assert 0 <= alpha <=", "veh_id not in veh_id_tuple: self.veh_dict.pop(veh_id) for veh_id in list(self.veh_dict.keys()): self.veh_dict[veh_id].update_info(self.rd,", "= False self.ego = self.veh_dict[self.egoID] self.ego.trgt_laneIndex = tlane self.ego.is_ego =", "2] = veh.pos_lat self.observation[name * 4 + 3] = veh.acce", "if self.ego.dis2entrance < 10.0: self.done = True # print('reset on:", "* 4 + 1] = self.observation[1] self.observation[name * 4 +", "back to original lane action[1] = 2: keep in current", "+ 2*delta_A * self.ego.leaderDis) TTC = TTC/delta_A if self.lateral_action !=", "debugging, and sometimes learning) self.action_space = spaces.Discrete(6) self.observation_space = spaces.Box(low=-np.inf,", "== 1 and 0 < TTC2 < 2: r_long_t =", "# gui if gui is True: self.sumoBinary += '-gui' self.sumoCmd", "* delta_A2 * delta_D2) TTC2 = TTC2 / delta_A2 if", "= Road() self.timestep = 0 self.dt = traci.simulation.getDeltaT() self.randomseed =", "- self.veh_dict[self.ego.leaderID].pos_lat) / 3.2 # assert 0 <= alpha <=", "ego vehicle out of env if self.egoID not in self.vehID_tuple_all:", "is reached, call `reset()` outside env!! to reset this environment's", "self.ego.targetLeaderID is not None: # compute longitudinal time gap delta_V2", "# else: # r_safe_tgtleader = 0 # # # r_safe", "ramp entrance, dis2targetlane:', # self.ego.dis2tgtLane) # ego vehicle out of", "int. 0:light; 1:medium; 2:dense :return: initial observation \"\"\" self.seed(randomseed) if", "orginal/target lane leader will not change despite the lateral position", "self.update_veh_dict(self.vehID_tuple_all) def step(self, action=2): \"\"\"Run one timestep of the environment's", "= spaces.Box(low=-np.inf, high=np.inf, shape=(20,)) def update_veh_dict(self, veh_id_tuple): for veh_id in", "not None: while self.egoID not in self.veh_dict.keys(): # must ensure", "self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd) # print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat)", "while ego is not in env\" self.done = False self.ego", "# assert 0 <= alpha <= 1.1 # # r_safe_tgtleader", "seed is None: self.sumoCmd += ['--random'] else: self.sumoCmd += ['--seed',", "self.vehID_tuple_all = () self.egoID = id self.ego = None #", "!= 1 and 0 < TTC < 2: r_long_c =", "follower self.observation = np.empty(20) self.reward = None # (float) :", "randomseed=None): \"\"\" reset env :param id: ego vehicle id :param", "ego may change lateral: action[1] = 1: lane change action[1]", "close to ramp entrance if self.ego.dis2entrance < 10.0: self.done =", "= self.ego.speed self.observation[2] = self.ego.pos_lat self.observation[3] = self.ego.acce self._updateObservationSingle(1, self.ego.orig_leader)", "4 + 0] = veh.lanePos self.observation[name * 4 + 1]", "as np from env.IDM import IDM from env.Road import Road", "action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01:", "leader longitudinal2: action[0] = 0: follow original lane leader action[0]", "else: self.sumoCmd = [self.sumoBinary] + self.sumoCmd traci.start(self.sumoCmd) self.rd = Road()", "r_safe_leader = 0 # if self.ego.targetLeaderID is not None: #", "instead of '3.0' self.sumoBinary = \"/usr/local/Cellar/sumo/1.2.0/bin/sumo\" self.sumoCmd = ['-c', self.cfg,", "average 9 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg' elif traffic == 2:", "= 1: follow target lane leader **important**: orginal/target lane leader", "if self.lateral_action == 1: # lane change alpha = abs(self.ego.pos_lat", "import Road from env.Vehicle import Vehicle import math # add", "print('alpha', alpha) # assert 0 <= alpha <= 1.1 #", "* 4 + 2] = 4.8 self.observation[name * 4 +", "a tuple (observation, reward, done, info). Args: action (object): longitudinal0:", "delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos", "self.ego.trgt_leader) self._updateObservationSingle(4, self.ego.trgt_follower) # self.observation = np.array(self.observation).flatten() # print(self.observation.shape) def", "self.ego.lanePos if delta_A2 == 0: TTC2 = - abs(delta_D2) /", "action[0] = 1: follow closer leader longitudinal2: action[0] = 0:", "'resetFlag': 0} # (dict): contains auxiliary diagnostic information (helpful for", "print('reset on: self.ego not in env:', self.egoID not in self.vehID_tuple_all)", "self.egoID = id self.ego = None # self.tgtLane = tgtlane", "not None: # compute longitudinal time gap delta_V = self.veh_dict[self.ego.leaderID].speed", "self.ego.changeLane(True, -1, self.rd) # longitudinal control2--------------------- acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi) #", "self.tgtLane = tgtlane self.is_success = False self.collision_num = 0 self.lateral_action", "= -math.exp(-4*alpha+5) else: r_lat_c = 0 if self.ego.targetLeaderID is not", "1 wc2 = 1 wt = 1 ws = 1", "average 19 vehicles self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg' else: # average 14", "self.info = { 'resetFlag': 0} # (dict): contains auxiliary diagnostic", "# too close to ramp entrance if self.ego.dis2entrance < 10.0:", "* TTC2 + 5) else: r_long_t = 0 if self.lateral_action", "delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce", "1: # lane change alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) /", "delta_A == 0: TTC = - abs(self.ego.leaderDis)/delta_V else: TTC =", "flow density if traffic == 0: # average 9 vehicles", "self.rd = Road() self.timestep = 0 self.dt = traci.simulation.getDeltaT() self.randomseed", "= self.ego.speed + acceNext * 0.1 traci.vehicle.setSpeed(self.egoID, vNext) # update", "= - math.exp(-2 * TTC2 + 5) else: r_long_t =", "lane leader action[0] = 1: follow target lane leader **important**:", "self.lateral_action != 1 and 0 < TTC < 2: r_long_c" ]
[ "= requests.get(link) cidrs = r.json() if r.status_code == 200: for", "link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link) elif version['instance'] ==", "200: versions = r.json() logger.info(versions) for version in versions: if", "sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/') iptype = ipaddress.ip_address(hostmask[0]) nametype", "'pk': nametype, 'sk': sortkey, 'service': cidr['serviceArea'], 'cidr': ip, 'created': latest,", "IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link) elif", "== 'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken", "'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link) elif version['instance'] == 'USGovDoD': response", "= str(latest), Type = 'String', Overwrite = True ) def", "if version['instance'] == 'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value']", "elif version['instance'] == 'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value']", "r.status_code == 200: for cidr in cidrs: try: if len(cidr['ips'])", "Ranges Updated') response = client.put_parameter( Name = parameter, Value =", "True ) def handler(event, context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status", "logger.info('Updating o365 USGovDoD IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'],", ") except: pass logger.info('o365 '+instance+' IP Ranges Updated') response =", "netrange[0], netrange[-1] firstip = int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last)) elif nametype", "os import requests import uuid logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb", "IP Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link) elif", "pass logger.info('o365 '+instance+' IP Ranges Updated') response = client.put_parameter( Name", "r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code: '+str(r.status_code)) if r.status_code ==", "'String', Overwrite = True ) def handler(event, context): r =", "str(version['latest']): logger.info('Updating o365 Worldwide IP Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'],", "prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Germany", "Range Updates') return { 'statusCode': 200, 'body': json.dumps('Download o365 IP", "cidr in cidrs: try: if len(cidr['ips']) != 0: for ip", "= int(ipaddress.IPv4Address(last)) elif nametype == 'IPv6#': netrange = ipaddress.IPv6Network(ip) first,", "'created': latest, 'endpoint': instance, 'firstip': firstip, 'lastip': lastip } )", "link) elif version['instance'] == 'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken =", "'endpoint': instance, 'firstip': firstip, 'lastip': lastip } ) except: pass", "o365 Worldwide IP Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'],", "= 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link) elif version['instance'] == 'USGovDoD':", "os.environ['GERMANY_PARAMETER'], link) else: logger.info('No o365 IP Range Updates') return {", "= client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating", "'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link) else: logger.info('No o365 IP Range", "client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365", "r.json() if r.status_code == 200: for cidr in cidrs: try:", "client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365", "return { 'statusCode': 200, 'body': json.dumps('Download o365 IP Ranges') }", "versions = r.json() logger.info(versions) for version in versions: if version['instance']", "== 200: for cidr in cidrs: try: if len(cidr['ips']) !=", "response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']):", "logger.info('Link Status Code: '+str(r.status_code)) if r.status_code == 200: versions =", "import logging import os import requests import uuid logger =", "!= str(version['latest']): logger.info('Updating o365 USGovDoD IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4())", "requests import uuid logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb')", "handler(event, context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code: '+str(r.status_code)) if", "latest, 'endpoint': instance, 'firstip': firstip, 'lastip': lastip } ) except:", "int(ipaddress.IPv4Address(last)) elif nametype == 'IPv6#': netrange = ipaddress.IPv6Network(ip) first, last", "import ipaddress import json import logging import os import requests", "= response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovGCCHigh IP", "import requests import uuid logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb =", "elif nametype == 'IPv6#': netrange = ipaddress.IPv6Network(ip) first, last =", "first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv4Address(first)) lastip =", "client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365", "link) elif version['instance'] == 'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken =", "response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']):", "def downloader(instance, latest, parameter, link): r = requests.get(link) cidrs =", "== 'IPv4#': netrange = ipaddress.IPv4Network(ip) first, last = netrange[0], netrange[-1]", "= 'IPv'+str(iptype.version)+'#' if nametype == 'IPv4#': netrange = ipaddress.IPv4Network(ip) first,", "ip, 'created': latest, 'endpoint': instance, 'firstip': firstip, 'lastip': lastip }", "if prevtoken != str(version['latest']): logger.info('Updating o365 Worldwide IP Ranges') link", "logger.info('Updating o365 China IP Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'],", "versions: if version['instance'] == 'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken =", "response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 China IP Ranges')", "prevtoken != str(version['latest']): logger.info('Updating o365 Germany IP Ranges') link =", "table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm') def downloader(instance, latest, parameter,", "try: if len(cidr['ips']) != 0: for ip in cidr['ips']: sortkey", "lastip = int(ipaddress.IPv4Address(last)) elif nametype == 'IPv6#': netrange = ipaddress.IPv6Network(ip)", "firstip, 'lastip': lastip } ) except: pass logger.info('o365 '+instance+' IP", "r = requests.get(link) cidrs = r.json() if r.status_code == 200:", "in cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/') iptype =", "= 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link) elif version['instance'] == 'USGovGCCHigh':", "USGovDoD IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link)", "cidrs: try: if len(cidr['ips']) != 0: for ip in cidr['ips']:", "client.put_parameter( Name = parameter, Value = str(latest), Type = 'String',", "prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Worldwide", "== 'IPv6#': netrange = ipaddress.IPv6Network(ip) first, last = netrange[0], netrange[-1]", "= netrange[0], netrange[-1] firstip = int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last)) table.put_item(", "Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link) elif version['instance']", "Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link) elif version['instance']", "'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken !=", "IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link) elif", "response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']):", "logger.info('No o365 IP Range Updates') return { 'statusCode': 200, 'body':", "'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link) elif version['instance'] == 'USGovGCCHigh': response", "{ 'pk': nametype, 'sk': sortkey, 'service': cidr['serviceArea'], 'cidr': ip, 'created':", "'+str(r.status_code)) if r.status_code == 200: versions = r.json() logger.info(versions) for", "if prevtoken != str(version['latest']): logger.info('Updating o365 China IP Ranges') link", "== 'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken", "netrange[-1] firstip = int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last)) table.put_item( Item= {", "int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last)) table.put_item( Item= { 'pk': nametype, 'sk':", "r.status_code == 200: versions = r.json() logger.info(versions) for version in", "= dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm') def downloader(instance, latest, parameter, link):", "'IPv4#': netrange = ipaddress.IPv4Network(ip) first, last = netrange[0], netrange[-1] firstip", "0: for ip in cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask =", "netrange = ipaddress.IPv6Network(ip) first, last = netrange[0], netrange[-1] firstip =", "downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link) elif version['instance'] == 'USGovGCCHigh': response =", "else: logger.info('No o365 IP Range Updates') return { 'statusCode': 200,", "IP Ranges Updated') response = client.put_parameter( Name = parameter, Value", "Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link) elif version['instance']", "= ip.split('/') iptype = ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#' if nametype", "prevtoken != str(version['latest']): logger.info('Updating o365 China IP Ranges') link =", "prevtoken != str(version['latest']): logger.info('Updating o365 Worldwide IP Ranges') link =", "o365 USGovGCCHigh IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'],", "Item= { 'pk': nametype, 'sk': sortkey, 'service': cidr['serviceArea'], 'cidr': ip,", "version['latest'], os.environ['DOD_PARAMETER'], link) elif version['instance'] == 'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER'])", "response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Germany IP Ranges')", "ip.split('/') iptype = ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#' if nametype ==", "os.environ['HIGH_PARAMETER'], link) elif version['instance'] == 'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken", "IP Range Updates') return { 'statusCode': 200, 'body': json.dumps('Download o365", "for ip in cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/')", "in cidrs: try: if len(cidr['ips']) != 0: for ip in", "logger.info('Updating o365 Germany IP Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'],", "IP Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link) elif", "logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm')", "'+instance+' IP Ranges Updated') response = client.put_parameter( Name = parameter,", "instance, 'firstip': firstip, 'lastip': lastip } ) except: pass logger.info('o365", "'firstip': firstip, 'lastip': lastip } ) except: pass logger.info('o365 '+instance+'", "version['instance'] == 'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value'] if", "sortkey, 'service': cidr['serviceArea'], 'cidr': ip, 'created': latest, 'endpoint': instance, 'firstip':", "str(version['latest']): logger.info('Updating o365 Germany IP Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'],", "Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link) else: logger.info('No", "prevtoken != str(version['latest']): logger.info('Updating o365 USGovDoD IP Ranges') link =", "= ipaddress.IPv4Network(ip) first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv4Address(first))", "= boto3.client('ssm') def downloader(instance, latest, parameter, link): r = requests.get(link)", "import uuid logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb') table", "netrange = ipaddress.IPv4Network(ip) first, last = netrange[0], netrange[-1] firstip =", "str(version['latest']): logger.info('Updating o365 USGovGCCHigh IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'],", "requests.get(link) cidrs = r.json() if r.status_code == 200: for cidr", "boto3 import ipaddress import json import logging import os import", "!= str(version['latest']): logger.info('Updating o365 China IP Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4())", "= r.json() if r.status_code == 200: for cidr in cidrs:", "China IP Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link)", "version['latest'], os.environ['GERMANY_PARAMETER'], link) else: logger.info('No o365 IP Range Updates') return", "if nametype == 'IPv4#': netrange = ipaddress.IPv4Network(ip) first, last =", "response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']):", "Worldwide IP Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link)", "= boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm') def downloader(instance,", "len(cidr['ips']) != 0: for ip in cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip", "downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link) else: logger.info('No o365 IP Range Updates')", ") def handler(event, context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code:", "logger.info('o365 '+instance+' IP Ranges Updated') response = client.put_parameter( Name =", "str(latest), Type = 'String', Overwrite = True ) def handler(event,", "str(version['latest']): logger.info('Updating o365 USGovDoD IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'],", "!= str(version['latest']): logger.info('Updating o365 Worldwide IP Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4())", "Type = 'String', Overwrite = True ) def handler(event, context):", "elif version['instance'] == 'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value']", "!= str(version['latest']): logger.info('Updating o365 USGovGCCHigh IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4())", "= logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client", "str(version['latest']): logger.info('Updating o365 China IP Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'],", "cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/') iptype = ipaddress.ip_address(hostmask[0])", "link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link) elif version['instance'] ==", "version in versions: if version['instance'] == 'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER'])", "link) else: logger.info('No o365 IP Range Updates') return { 'statusCode':", "boto3.client('ssm') def downloader(instance, latest, parameter, link): r = requests.get(link) cidrs", "o365 IP Range Updates') return { 'statusCode': 200, 'body': json.dumps('Download", "if r.status_code == 200: for cidr in cidrs: try: if", "ipaddress.IPv6Network(ip) first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv6Address(first)) lastip", "version['instance'] == 'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value'] if", "context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code: '+str(r.status_code)) if r.status_code", "ipaddress.IPv4Network(ip) first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv4Address(first)) lastip", "except: pass logger.info('o365 '+instance+' IP Ranges Updated') response = client.put_parameter(", "Overwrite = True ) def handler(event, context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4()))", "logger.info(versions) for version in versions: if version['instance'] == 'Worldwide': response", "Updated') response = client.put_parameter( Name = parameter, Value = str(latest),", "os.environ['WORLD_PARAMETER'], link) elif version['instance'] == 'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken", "version['instance'] == 'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value'] if", "ip in cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/') iptype", "prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovGCCHigh", "Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link) elif version['instance']", "version['latest'], os.environ['CHINA_PARAMETER'], link) elif version['instance'] == 'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER'])", "first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv6Address(first)) lastip =", "logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])", "for cidr in cidrs: try: if len(cidr['ips']) != 0: for", "= response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Germany IP", "if prevtoken != str(version['latest']): logger.info('Updating o365 Germany IP Ranges') link", "== 'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken", "link) elif version['instance'] == 'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken =", "= response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 China IP", "= int(ipaddress.IPv6Address(last)) table.put_item( Item= { 'pk': nametype, 'sk': sortkey, 'service':", "version['instance'] == 'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value'] if", "Status Code: '+str(r.status_code)) if r.status_code == 200: versions = r.json()", "= response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Worldwide IP", "import json import logging import os import requests import uuid", "int(ipaddress.IPv6Address(last)) table.put_item( Item= { 'pk': nametype, 'sk': sortkey, 'service': cidr['serviceArea'],", "= parameter, Value = str(latest), Type = 'String', Overwrite =", "if len(cidr['ips']) != 0: for ip in cidr['ips']: sortkey =", "last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last))", "last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last))", "!= str(version['latest']): logger.info('Updating o365 Germany IP Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4())", "if r.status_code == 200: versions = r.json() logger.info(versions) for version", "prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 China", "o365 USGovDoD IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'],", "Updates') return { 'statusCode': 200, 'body': json.dumps('Download o365 IP Ranges')", "netrange[-1] firstip = int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last)) elif nametype ==", "'IPv'+str(iptype.version)+'#' if nametype == 'IPv4#': netrange = ipaddress.IPv4Network(ip) first, last", "lastip = int(ipaddress.IPv6Address(last)) table.put_item( Item= { 'pk': nametype, 'sk': sortkey,", "= client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating", "os.environ['DOD_PARAMETER'], link) elif version['instance'] == 'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken", "for version in versions: if version['instance'] == 'Worldwide': response =", "Name = parameter, Value = str(latest), Type = 'String', Overwrite", "netrange[0], netrange[-1] firstip = int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last)) table.put_item( Item=", "= 'String', Overwrite = True ) def handler(event, context): r", "if prevtoken != str(version['latest']): logger.info('Updating o365 USGovDoD IP Ranges') link", "response = client.put_parameter( Name = parameter, Value = str(latest), Type", "latest, parameter, link): r = requests.get(link) cidrs = r.json() if", "= requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code: '+str(r.status_code)) if r.status_code == 200:", "!= 0: for ip in cidr['ips']: sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask", "client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365", "response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 Worldwide IP Ranges')", "json import logging import os import requests import uuid logger", "nametype, 'sk': sortkey, 'service': cidr['serviceArea'], 'cidr': ip, 'created': latest, 'endpoint':", "== 'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken", "= 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link) elif version['instance'] == 'China':", "= True ) def handler(event, context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link", "requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code: '+str(r.status_code)) if r.status_code == 200: versions", "200: for cidr in cidrs: try: if len(cidr['ips']) != 0:", "logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client =", "o365 Germany IP Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'],", "prevtoken != str(version['latest']): logger.info('Updating o365 USGovGCCHigh IP Ranges') link =", "'lastip': lastip } ) except: pass logger.info('o365 '+instance+' IP Ranges", "o365 China IP Ranges') link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'],", "downloader(instance, latest, parameter, link): r = requests.get(link) cidrs = r.json()", "link): r = requests.get(link) cidrs = r.json() if r.status_code ==", "= r.json() logger.info(versions) for version in versions: if version['instance'] ==", "'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link) elif version['instance'] == 'China': response", "import os import requests import uuid logger = logging.getLogger() logger.setLevel(logging.INFO)", "} ) except: pass logger.info('o365 '+instance+' IP Ranges Updated') response", "'service': cidr['serviceArea'], 'cidr': ip, 'created': latest, 'endpoint': instance, 'firstip': firstip,", "= client.put_parameter( Name = parameter, Value = str(latest), Type =", "downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link) elif version['instance'] == 'USGovDoD': response =", "version['latest'], os.environ['HIGH_PARAMETER'], link) elif version['instance'] == 'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER'])", "logger.info('Updating o365 Worldwide IP Ranges') link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'],", "'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken !=", "prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovDoD", "uuid logger = logging.getLogger() logger.setLevel(logging.INFO) dynamodb = boto3.resource('dynamodb') table =", "cidr['serviceArea'], 'cidr': ip, 'created': latest, 'endpoint': instance, 'firstip': firstip, 'lastip':", "hostmask = ip.split('/') iptype = ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#' if", "= ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#' if nametype == 'IPv4#': netrange", "= int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last)) table.put_item( Item= { 'pk': nametype,", "link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link) elif version['instance'] ==", "USGovGCCHigh IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link)", "= int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last)) elif nametype == 'IPv6#': netrange", "'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken !=", "ipaddress import json import logging import os import requests import", "boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm') def downloader(instance, latest,", "cidrs = r.json() if r.status_code == 200: for cidr in", "version['latest'], os.environ['WORLD_PARAMETER'], link) elif version['instance'] == 'USGovDoD': response = client.get_parameter(Name=os.environ['DOD_PARAMETER'])", "== 'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken", "'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken !=", "client = boto3.client('ssm') def downloader(instance, latest, parameter, link): r =", "elif version['instance'] == 'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value']", "= ipaddress.IPv6Network(ip) first, last = netrange[0], netrange[-1] firstip = int(ipaddress.IPv6Address(first))", "downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link) elif version['instance'] == 'Germany': response =", "link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link) else: logger.info('No o365", "= 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/') iptype = ipaddress.ip_address(hostmask[0]) nametype =", "logger.info('Updating o365 USGovGCCHigh IP Ranges') link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'],", "firstip = int(ipaddress.IPv6Address(first)) lastip = int(ipaddress.IPv6Address(last)) table.put_item( Item= { 'pk':", "dynamodb = boto3.resource('dynamodb') table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm') def", "'IPv6#': netrange = ipaddress.IPv6Network(ip) first, last = netrange[0], netrange[-1] firstip", "os.environ['CHINA_PARAMETER'], link) elif version['instance'] == 'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken", "lastip } ) except: pass logger.info('o365 '+instance+' IP Ranges Updated')", "iptype = ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#' if nametype == 'IPv4#':", "'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link) elif version['instance'] == 'Germany': response", "nametype = 'IPv'+str(iptype.version)+'#' if nametype == 'IPv4#': netrange = ipaddress.IPv4Network(ip)", "r.json() logger.info(versions) for version in versions: if version['instance'] == 'Worldwide':", "client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365", "nametype == 'IPv4#': netrange = ipaddress.IPv4Network(ip) first, last = netrange[0],", "'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip hostmask = ip.split('/') iptype = ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#'", "dynamodb.Table(os.environ['DYNAMODB_TABLE']) client = boto3.client('ssm') def downloader(instance, latest, parameter, link): r", "parameter, Value = str(latest), Type = 'String', Overwrite = True", "Value = str(latest), Type = 'String', Overwrite = True )", "= client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating", "response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']):", "= client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating", "link) elif version['instance'] == 'China': response = client.get_parameter(Name=os.environ['CHINA_PARAMETER']) prevtoken =", "table.put_item( Item= { 'pk': nametype, 'sk': sortkey, 'service': cidr['serviceArea'], 'cidr':", "Code: '+str(r.status_code)) if r.status_code == 200: versions = r.json() logger.info(versions)", "== 200: versions = r.json() logger.info(versions) for version in versions:", "elif version['instance'] == 'USGovGCCHigh': response = client.get_parameter(Name=os.environ['HIGH_PARAMETER']) prevtoken = response['Parameter']['Value']", "'sk': sortkey, 'service': cidr['serviceArea'], 'cidr': ip, 'created': latest, 'endpoint': instance,", "IP Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link) else:", "= client.get_parameter(Name=os.environ['DOD_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating", "ipaddress.ip_address(hostmask[0]) nametype = 'IPv'+str(iptype.version)+'#' if nametype == 'IPv4#': netrange =", "version['instance'] == 'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value'] if", "if prevtoken != str(version['latest']): logger.info('Updating o365 USGovGCCHigh IP Ranges') link", "firstip = int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last)) elif nametype == 'IPv6#':", "link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link) elif version['instance'] ==", "'cidr': ip, 'created': latest, 'endpoint': instance, 'firstip': firstip, 'lastip': lastip", "= netrange[0], netrange[-1] firstip = int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last)) elif", "downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link) elif version['instance'] == 'China': response =", "import boto3 import ipaddress import json import logging import os", "'Germany': response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER']) prevtoken = response['Parameter']['Value'] if prevtoken !=", "= 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link) else: logger.info('No o365 IP", "nametype == 'IPv6#': netrange = ipaddress.IPv6Network(ip) first, last = netrange[0],", "response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovGCCHigh IP Ranges')", "int(ipaddress.IPv4Address(first)) lastip = int(ipaddress.IPv4Address(last)) elif nametype == 'IPv6#': netrange =", "response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovDoD IP Ranges')", "= 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link) elif version['instance'] == 'Germany':", "Germany IP Ranges') link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4()) downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link)", "logging import os import requests import uuid logger = logging.getLogger()", "def handler(event, context): r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4())) logger.info('Link Status Code: '+str(r.status_code))", "parameter, link): r = requests.get(link) cidrs = r.json() if r.status_code", "= response['Parameter']['Value'] if prevtoken != str(version['latest']): logger.info('Updating o365 USGovDoD IP", "in versions: if version['instance'] == 'Worldwide': response = client.get_parameter(Name=os.environ['WORLD_PARAMETER']) prevtoken" ]
[ "from money import * c = Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial Cash\",", "* c = Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\", 500))", "c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000, \"2012-01-01\")) c.add_flow(SemiMonthlyWages(\"Payroll\",", "Inc\") c.add_flow(FixedCost(\"Initial Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000,", "500)) c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000)))", "<reponame>zulip/finbot #!/usr/bin/python from money import * c = Company(\"Example Inc\")", "money import * c = Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial Cash\", -500000))", "ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000, \"2012-01-01\")) c.add_flow(SemiMonthlyWages(\"Payroll\", 6000,", "c = Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\",", "c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\",", "Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\", 14))", "c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000)))", "50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000, \"2012-01-01\")) c.add_flow(SemiMonthlyWages(\"Payroll\", 6000, \"2012-01-01\")) print(c) c.cash_monthly_summary(\"2012-01-01\", \"2013-07-01\")", "50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000, \"2012-01-01\")) c.add_flow(SemiMonthlyWages(\"Payroll\", 6000, \"2012-01-01\"))", "-500000)) c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\",", "import * c = Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\",", "FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000, \"2012-01-01\")) c.add_flow(SemiMonthlyWages(\"Payroll\", 6000, \"2012-01-01\")) print(c) c.cash_monthly_summary(\"2012-01-01\",", "#!/usr/bin/python from money import * c = Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial", "Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\",", "\"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000,", "14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000, \"2012-01-01\"))", "c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\", 4000, \"2012-01-01\")) c.add_flow(SemiMonthlyWages(\"Payroll\", 6000, \"2012-01-01\")) print(c)", "= Company(\"Example Inc\") c.add_flow(FixedCost(\"Initial Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\", 50000))", "50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\",", "4000, \"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\", 50000))) c.add_flow(DelayedCost(\"2012-02-01\", FixedCost(\"Financing\", 50000))) c.add_flow(SemiMonthlyCost(\"Payroll\",", "c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\", 14)) c.add_flow(DelayedCost(\"2012-02-01\", ConstantCost(\"Office\",", "c.add_flow(FixedCost(\"Initial Cash\", -500000)) c.add_flow(FixedCost(\"Incorporation\", 500)) c.add_flow(ConstantCost(\"Office\", 50000)) c.add_flow(PeriodicCost(\"Subscription\", 4000, \"2012-01-05\"," ]
[ "1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server %s -", "port) except socket.error as e: LOGGER.error('Checking port %s:%d - Failed',", "raise ValidatorError(e) ping.short_name = 'PING' def port(address, port): s =", "stderr=subprocess.PIPE) LOGGER.info('Ping server %s - OK', address) except subprocess.CalledProcessError as", "socket import subprocess import argparse import logging LOGGER = logging.getLogger(__name__)", "logging LOGGER = logging.getLogger(__name__) class ValidatorError(Exception): pass def ping(address): try:", "s.connect((address, port)) LOGGER.info('Checking port %s:%d - OK', address, port) except", "def ping(address): try: subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE,", "subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server", "import logging LOGGER = logging.getLogger(__name__) class ValidatorError(Exception): pass def ping(address):", "import socket import subprocess import argparse import logging LOGGER =", "%s:%d - OK', address, port) except socket.error as e: LOGGER.error('Checking", "address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server %s - OK', address) except", "LOGGER.info('Ping server %s - OK', address) except subprocess.CalledProcessError as e:", "OK', address) except subprocess.CalledProcessError as e: LOGGER.error('Ping server %s -", "port %s:%d - Failed', address, port) raise ValidatorError(e) port.short_name =", "'PING' def port(address, port): s = socket.socket() try: s.connect((address, port))", "stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server %s - OK', address) except subprocess.CalledProcessError", "import argparse import logging LOGGER = logging.getLogger(__name__) class ValidatorError(Exception): pass", "os import socket import subprocess import argparse import logging LOGGER", "- Failed', address) raise ValidatorError(e) ping.short_name = 'PING' def port(address,", "import os import socket import subprocess import argparse import logging", "s = socket.socket() try: s.connect((address, port)) LOGGER.info('Checking port %s:%d -", "= 'PING' def port(address, port): s = socket.socket() try: s.connect((address,", "LOGGER.info('Checking port %s:%d - OK', address, port) except socket.error as", "except subprocess.CalledProcessError as e: LOGGER.error('Ping server %s - Failed', address)", "def port(address, port): s = socket.socket() try: s.connect((address, port)) LOGGER.info('Checking", "LOGGER = logging.getLogger(__name__) class ValidatorError(Exception): pass def ping(address): try: subprocess.check_call(('ping',", "port): s = socket.socket() try: s.connect((address, port)) LOGGER.info('Checking port %s:%d", "- OK', address, port) except socket.error as e: LOGGER.error('Checking port", "address, port) except socket.error as e: LOGGER.error('Checking port %s:%d -", "OK', address, port) except socket.error as e: LOGGER.error('Checking port %s:%d", "address) raise ValidatorError(e) ping.short_name = 'PING' def port(address, port): s", "ValidatorError(Exception): pass def ping(address): try: subprocess.check_call(('ping', '-c 1', '-W 1',", "= socket.socket() try: s.connect((address, port)) LOGGER.info('Checking port %s:%d - OK',", "'-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server %s - OK',", "1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server %s - OK', address)", "- OK', address) except subprocess.CalledProcessError as e: LOGGER.error('Ping server %s", "import subprocess import argparse import logging LOGGER = logging.getLogger(__name__) class", "class ValidatorError(Exception): pass def ping(address): try: subprocess.check_call(('ping', '-c 1', '-W", "server %s - OK', address) except subprocess.CalledProcessError as e: LOGGER.error('Ping", "e: LOGGER.error('Checking port %s:%d - Failed', address, port) raise ValidatorError(e)", "try: subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping", "LOGGER.error('Ping server %s - Failed', address) raise ValidatorError(e) ping.short_name =", "pass def ping(address): try: subprocess.check_call(('ping', '-c 1', '-W 1', address),", "ValidatorError(e) ping.short_name = 'PING' def port(address, port): s = socket.socket()", "port(address, port): s = socket.socket() try: s.connect((address, port)) LOGGER.info('Checking port", "port)) LOGGER.info('Checking port %s:%d - OK', address, port) except socket.error", "server %s - Failed', address) raise ValidatorError(e) ping.short_name = 'PING'", "except socket.error as e: LOGGER.error('Checking port %s:%d - Failed', address,", "ping(address): try: subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "%s - Failed', address) raise ValidatorError(e) ping.short_name = 'PING' def", "try: s.connect((address, port)) LOGGER.info('Checking port %s:%d - OK', address, port)", "#!/usr/bin/env python import os import socket import subprocess import argparse", "socket.error as e: LOGGER.error('Checking port %s:%d - Failed', address, port)", "socket.socket() try: s.connect((address, port)) LOGGER.info('Checking port %s:%d - OK', address,", "%s:%d - Failed', address, port) raise ValidatorError(e) port.short_name = 'PORT'", "ping.short_name = 'PING' def port(address, port): s = socket.socket() try:", "LOGGER.error('Checking port %s:%d - Failed', address, port) raise ValidatorError(e) port.short_name", "'-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.info('Ping server %s", "port %s:%d - OK', address, port) except socket.error as e:", "python import os import socket import subprocess import argparse import", "subprocess import argparse import logging LOGGER = logging.getLogger(__name__) class ValidatorError(Exception):", "= logging.getLogger(__name__) class ValidatorError(Exception): pass def ping(address): try: subprocess.check_call(('ping', '-c", "%s - OK', address) except subprocess.CalledProcessError as e: LOGGER.error('Ping server", "argparse import logging LOGGER = logging.getLogger(__name__) class ValidatorError(Exception): pass def", "as e: LOGGER.error('Ping server %s - Failed', address) raise ValidatorError(e)", "e: LOGGER.error('Ping server %s - Failed', address) raise ValidatorError(e) ping.short_name", "Failed', address) raise ValidatorError(e) ping.short_name = 'PING' def port(address, port):", "as e: LOGGER.error('Checking port %s:%d - Failed', address, port) raise", "address) except subprocess.CalledProcessError as e: LOGGER.error('Ping server %s - Failed',", "logging.getLogger(__name__) class ValidatorError(Exception): pass def ping(address): try: subprocess.check_call(('ping', '-c 1',", "subprocess.CalledProcessError as e: LOGGER.error('Ping server %s - Failed', address) raise" ]
[ "openeo.udf import XarrayDataCube def apply_datacube(cube: XarrayDataCube, context: dict) -> XarrayDataCube:", "from openeo.udf import XarrayDataCube def apply_datacube(cube: XarrayDataCube, context: dict) ->", "import XarrayDataCube def apply_datacube(cube: XarrayDataCube, context: dict) -> XarrayDataCube: return", "XarrayDataCube def apply_datacube(cube: XarrayDataCube, context: dict) -> XarrayDataCube: return cube", "<reponame>Open-EO/openeo-geopyspark-driver from openeo.udf import XarrayDataCube def apply_datacube(cube: XarrayDataCube, context: dict)" ]
[ "\"\"\" State Implementation: has a resource and go back home", "= player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin = self.player.playerData.Position target =", "resource home, look 4 resources again if(not self.player.hasResources()): self.player.state =", "self.player.hasResources()): self.player.state = StateLook4Resources(self.player) return create_purchase_action(0) return create_move_action(moves[0]) def toString():", "home, look 4 resources again if(not self.player.hasResources()): self.player.state = StateLook4Resources(self.player)", "StateGoHome(PlayerState): \"\"\" State Implementation: has a resource and go back", "player): self.player = player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin = self.player.playerData.Position", "again if(not self.player.hasResources()): self.player.state = StateLook4Resources(self.player) return create_purchase_action(0) return create_move_action(moves[0])", "doAction(self): origin = self.player.playerData.Position target = self.player.target moves = PathFinder(self.player.mapView).getPath(origin,", "player just gave the resource home, look 4 resources again", "if(not self.player.hasResources()): self.player.state = StateLook4Resources(self.player) return create_purchase_action(0) return create_move_action(moves[0]) def", "If player just gave the resource home, look 4 resources", "home \"\"\" def __init__(self, player): self.player = player self.player.setTarget(self.player.playerData.HouseLocation) def", "from pathFinder import PathFinder from StateLook4Resources import * class StateGoHome(PlayerState):", "PathFinder(self.player.mapView).getPath(origin, target) # If player just gave the resource home,", "import * from pathFinder import PathFinder from StateLook4Resources import *", "import PathFinder from StateLook4Resources import * class StateGoHome(PlayerState): \"\"\" State", "import * class StateGoHome(PlayerState): \"\"\" State Implementation: has a resource", "PlayerState import * from pathFinder import PathFinder from StateLook4Resources import", "\"\"\" def __init__(self, player): self.player = player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self):", "self.player.playerData.Position target = self.player.target moves = PathFinder(self.player.mapView).getPath(origin, target) # If", "target) # If player just gave the resource home, look", "self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin = self.player.playerData.Position target = self.player.target moves", "# If player just gave the resource home, look 4", "gave the resource home, look 4 resources again if(not self.player.hasResources()):", "back home \"\"\" def __init__(self, player): self.player = player self.player.setTarget(self.player.playerData.HouseLocation)", "moves = PathFinder(self.player.mapView).getPath(origin, target) # If player just gave the", "self.player = player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin = self.player.playerData.Position target", "= self.player.playerData.Position target = self.player.target moves = PathFinder(self.player.mapView).getPath(origin, target) #", "Implementation: has a resource and go back home \"\"\" def", "from StateLook4Resources import * class StateGoHome(PlayerState): \"\"\" State Implementation: has", "resource and go back home \"\"\" def __init__(self, player): self.player", "__init__(self, player): self.player = player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin =", "def doAction(self): origin = self.player.playerData.Position target = self.player.target moves =", "* from pathFinder import PathFinder from StateLook4Resources import * class", "pathFinder import PathFinder from StateLook4Resources import * class StateGoHome(PlayerState): \"\"\"", "= PathFinder(self.player.mapView).getPath(origin, target) # If player just gave the resource", "resources again if(not self.player.hasResources()): self.player.state = StateLook4Resources(self.player) return create_purchase_action(0) return", "PathFinder from StateLook4Resources import * class StateGoHome(PlayerState): \"\"\" State Implementation:", "4 resources again if(not self.player.hasResources()): self.player.state = StateLook4Resources(self.player) return create_purchase_action(0)", "def __init__(self, player): self.player = player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin", "* class StateGoHome(PlayerState): \"\"\" State Implementation: has a resource and", "self.player.target moves = PathFinder(self.player.mapView).getPath(origin, target) # If player just gave", "just gave the resource home, look 4 resources again if(not", "self.player.state = StateLook4Resources(self.player) return create_purchase_action(0) return create_move_action(moves[0]) def toString(): return", "go back home \"\"\" def __init__(self, player): self.player = player", "= StateLook4Resources(self.player) return create_purchase_action(0) return create_move_action(moves[0]) def toString(): return \"StateGoHome\"", "origin = self.player.playerData.Position target = self.player.target moves = PathFinder(self.player.mapView).getPath(origin, target)", "has a resource and go back home \"\"\" def __init__(self,", "State Implementation: has a resource and go back home \"\"\"", "player self.player.setTarget(self.player.playerData.HouseLocation) def doAction(self): origin = self.player.playerData.Position target = self.player.target", "target = self.player.target moves = PathFinder(self.player.mapView).getPath(origin, target) # If player", "look 4 resources again if(not self.player.hasResources()): self.player.state = StateLook4Resources(self.player) return", "a resource and go back home \"\"\" def __init__(self, player):", "class StateGoHome(PlayerState): \"\"\" State Implementation: has a resource and go", "the resource home, look 4 resources again if(not self.player.hasResources()): self.player.state", "= self.player.target moves = PathFinder(self.player.mapView).getPath(origin, target) # If player just", "StateLook4Resources import * class StateGoHome(PlayerState): \"\"\" State Implementation: has a", "from PlayerState import * from pathFinder import PathFinder from StateLook4Resources", "and go back home \"\"\" def __init__(self, player): self.player =" ]
[ "not None) hoomd.run(1) # assert an error is raised if", "slit = mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") # test", "relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test", "the slit size too large raises an error def test_validate_box(self):", "test_filler(self): # initialization of a filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5.,", "filler should be allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is not", "an error is raised if we set a bad particle", "[-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that", "== 0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one step", "[-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping", "first particle is matched exactly to the wall speed, and", "self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B') # assert an error is", "mpi builds if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) # default testing", "take one step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() ==", "hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85])", "= self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.])", "[-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one step and check bounce back", "to setup the filler, although this all happens silently hoomd.run(1)", "import mpcd # unit tests for mpcd slit streaming geometry", "boundary=\"no_slip\", period=2) # test for setting parameters def test_set_params(self): slit", "test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) #", "the second particle has y and z velocities flip again,", "hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1],", "bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing the", "[1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0, period=3) # change velocity", "hoomd import md from hoomd import mpcd # unit tests", "boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) # take one step hoomd.run(1)", "mphoward import unittest import numpy as np import hoomd from", "test basic stepping behavior with slip boundary conditions def test_step_slip(self):", "can be attached, removed, and updated def test_filler(self): # initialization", "virtual particle filler can be attached, removed, and updated def", "creation can happen (with all parameters set) def test_create(self): mpcd.stream.slit(H=4.,", "one step and check bounce back of particles hoomd.run(1) snap", "2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(),", "mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") # test basic stepping", "to wall snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: snap.particles.velocity[1]", "flip again, and since it started closer, # it moves", "= self.s.take_snapshot() if hoomd.comm.get_rank() == 0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap)", "error is raised if we set a bad particle type", "mpcd._mpcd.boundary.no_slip) # change H and also ensure other parameters stay", "z to where it started. # (vx stays the same,", "one particle will now hit the wall hoomd.run(1) snap =", "= mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") # test basic", "mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) #", "self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)", "== 0: # the first particle is matched exactly to", "type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B') # assert an", "0: # the first particle is matched exactly to the", "# removing the filler should still allow a run slit.remove_filler()", "and updated def test_filler(self): # initialization of a filler slit", "be allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is not None) hoomd.run(1)", "period=2) # test for setting parameters def test_set_params(self): slit =", "test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\", period=2) # test for setting parameters", "happens silently hoomd.run(1) # changing the geometry should still be", "slit.set_filler(density=5., kT=1.0, seed=42, type='B') # assert an error is raised", "mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(),", "# test creation can happen (with all parameters set) def", "started closer, # it moves relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1],", "np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0, period=3) # change", "bounds can be caught def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with", "set a bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) #", "hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1],", "snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0],", "hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: # the", "self.s.take_snapshot() if hoomd.comm.get_rank() == 0: # the first particle is", "a bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing", "wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0],", "that particles out of bounds can be caught def test_out_of_bounds(self):", "snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: # the first", "[-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0,", "at # same velocity along +x for 3 steps. It", "an error is raised if we set a bad density", "is matched exactly to the wall speed, and so it", "it moves relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.])", "mpcd._mpcd.boundary.slip) # test for invalid boundary conditions being set def", "[1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping", "test creation can happen (with all parameters set) def test_create(self):", "boundary conditions being set def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\")", "to the wall speed, and so it will translate at", "with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B') # assert an error", "change H and also ensure other parameters stay the same", "np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where one particle will", "np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where one", "np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take", "the first particle is matched exactly to the wall speed,", "self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H and also ensure other parameters", "0.1) # change BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) #", "in y and z to where it started. # (vx", "same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.)", "[-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic stepping behavior with slip", "that virtual particle filler can be attached, removed, and updated", "parameters stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary,", "snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1)", "take another step, wrapping the second particle through the boundary", "for setting parameters def test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.)", "geometry should still be OK with a run slit.set_params(V=1.0) hoomd.run(1)", "# assert an error is raised if we set a", "del self.s if __name__ == '__main__': unittest.main(argv = ['test.py', '-v'])", "np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take", "# make sure we can invalidate it again slit.set_params(H=10.) with", "error def test_validate_box(self): # initial configuration is invalid slit =", "out of bounds can be caught def test_out_of_bounds(self): slit =", "type='A') self.assertTrue(slit._filler is not None) # run should be able", "take another step where one particle will now hit the", "is invalid slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # now", "streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish the simulation", "slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is not None) # run", "the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(),", "with self.assertRaises(RuntimeError): hoomd.run(1) # now it should be valid slit.set_params(H=4.)", "hoomd.comm.get_rank() == 0: # the first particle is matched exactly", "hit the wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() ==", "[-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping the second", "# initialization of a filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0,", "is raised if we set a bad density with self.assertRaises(RuntimeError):", "wrapping the second particle through the boundary hoomd.run(1) snap =", "# test that particles out of bounds can be caught", "0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one step and", "stepping behavior with slip boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\")", "(vx stays the same, and vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0],", "of bounds can be caught def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8)", "def test_step_noslip(self): mpcd.stream.slit(H=4.) # take one step hoomd.run(1) snap =", "import md from hoomd import mpcd # unit tests for", "particle so it is translating relative to wall snap =", "HOOMD-blue project, released under the BSD 3-Clause License. # Maintainer:", "it started. # (vx stays the same, and vy and", "is raised if we set a bad particle type with", "setUp(self): # establish the simulation context hoomd.context.initialize() # set the", "translating relative to wall snap = self.s.take_snapshot() if hoomd.comm.get_rank() ==", "size too large raises an error def test_validate_box(self): # initial", "filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is", "configuration is invalid slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) #", "0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) #", "should be able to setup the filler, although this all", "if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])", "for 3 steps. It will bounce back in y and", "sure we can invalidate it again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1)", "removing the filler should still allow a run slit.remove_filler() self.assertTrue(slit._filler", "and also ensure other parameters stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H,", "under the BSD 3-Clause License. # Maintainer: mphoward import unittest", "mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\", period=2) # test for setting parameters def", "geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish the simulation context", "particle will now hit the wall hoomd.run(1) snap = self.s.take_snapshot()", "be caught def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1)", "[-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0, period=3) #", "run slit.set_params(V=1.0) hoomd.run(1) # changing filler should be allowed slit.set_filler(density=10.,", "slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish the", "# Maintainer: mphoward import unittest import numpy as np import", "kT=1.0, seed=42) # removing the filler should still allow a", "test for setting parameters def test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H,", "Maintainer: mphoward import unittest import numpy as np import hoomd", "hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the system from the starting snapshot", "filler, although this all happens silently hoomd.run(1) # changing the", "self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1)", "the system from the starting snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:]", "== 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.])", "slit.set_params(V=1.0) hoomd.run(1) # changing filler should be allowed slit.set_filler(density=10., kT=1.5,", "mpcd # unit tests for mpcd slit streaming geometry class", "# default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the system", "University of Michigan # This file is part of the", "hoomd.comm.decomposition(nz=2) # default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the", "# take another step, wrapping the second particle through the", "np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self):", "position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic stepping behavior", "the filler, although this all happens silently hoomd.run(1) # changing", "change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs", "we set a bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42)", "hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1],", "slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(),", "particle filler can be attached, removed, and updated def test_filler(self):", "of lower particle so it is translating relative to wall", "+x for 3 steps. It will bounce back in y", "(c) 2009-2019 The Regents of the University of Michigan #", "License. # Maintainer: mphoward import unittest import numpy as np", "np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic stepping behavior with", "np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping the", "the geometry should still be OK with a run slit.set_params(V=1.0)", "snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: snap.particles.velocity[1] = [-2.,-1.,-1.]", "back of particles hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() ==", "another step where one particle will now hit the wall", "hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) # default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.)))", "mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test that virtual", "self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation can happen (with", "hoomd import mpcd # unit tests for mpcd slit streaming", "setup the filler, although this all happens silently hoomd.run(1) #", "since it started closer, # it moves relative to original", "should still be OK with a run slit.set_params(V=1.0) hoomd.run(1) #", "will translate at # same velocity along +x for 3", "mpcd.stream.slit(H=4., boundary=\"slip\") # take one step hoomd.run(1) snap = self.s.take_snapshot()", "can invalidate it again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # test", "behavior with no slip boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) #", "# test that virtual particle filler can be attached, removed,", "snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]]", "self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") # test basic stepping behavior with no slip", "and check bounce back of particles hoomd.run(1) snap = self.s.take_snapshot()", "np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that setting the slit", "still be OK with a run slit.set_params(V=1.0) hoomd.run(1) # changing", "self.assertTrue(slit._filler is None) hoomd.run(1) def tearDown(self): del self.s if __name__", "the wall speed, and so it will translate at #", "slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") # test basic stepping behavior with", "and z to where it started. # (vx stays the", "== 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])", "it again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # test that particles", "slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # now it should", "set a bad particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42,", "box=hoomd.data.boxdim(L=10.))) # initialize the system from the starting snapshot snap", "0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) #", "kT=1.0, seed=42, type='B') # assert an error is raised if", "# same velocity along +x for 3 steps. It will", "with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test that virtual particle", "kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is not None) # run should", "\"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for invalid boundary conditions being", "test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0, period=3) # change velocity of lower", "velocities flip again, and since it started closer, # it", "conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\") # take one step hoomd.run(1)", "self.assertTrue(slit._filler is not None) hoomd.run(1) # assert an error is", "particles hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: #", "should still allow a run slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1)", "it will translate at # same velocity along +x for", "the simulation context hoomd.context.initialize() # set the decomposition in z", "step, wrapping the second particle through the boundary hoomd.run(1) snap", "again, and since it started closer, # it moves relative", "hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test that virtual particle filler can", "raised if we set a bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0,", "2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V,", "== 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.])", "import unittest import numpy as np import hoomd from hoomd", "V=0.1, boundary=\"no_slip\", period=2) # test for setting parameters def test_set_params(self):", "unit tests for mpcd slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def", "y and z to where it started. # (vx stays", "density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing the filler", "# unit tests for mpcd slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase):", "= [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one step and check bounce", "np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take", "is part of the HOOMD-blue project, released under the BSD", "test that particles out of bounds can be caught def", "bad particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B') #", "to where it started. # (vx stays the same, and", "0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) #", "system from the starting snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] =", "md from hoomd import mpcd # unit tests for mpcd", "unittest import numpy as np import hoomd from hoomd import", "np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step,", "setting the slit size too large raises an error def", "it should be valid slit.set_params(H=4.) hoomd.run(2) # make sure we", "and so it will translate at # same velocity along", "mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish the simulation context hoomd.context.initialize() #", "changing the geometry should still be OK with a run", "establish the simulation context hoomd.context.initialize() # set the decomposition in", "so it is translating relative to wall snap = self.s.take_snapshot()", "if hoomd.comm.get_rank() == 0: # the first particle is matched", "with slip boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\") # take", "attached, removed, and updated def test_filler(self): # initialization of a", "seed=42, type='B') # assert an error is raised if we", "self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1],", "if hoomd.comm.get_rank() == 0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run", "1: hoomd.comm.decomposition(nz=2) # default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize", "= self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.])", "= self.s.take_snapshot() if hoomd.comm.get_rank() == 0: # the first particle", "mpcd.integrator(dt=0.1) # test creation can happen (with all parameters set)", "\"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V", "boundary=\"slip\") # take one step hoomd.run(1) snap = self.s.take_snapshot() if", "[-1.,-1.,-1.]) # take another step where one particle will now", "# change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change", "invalidate it again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # test that", "np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step,", "# change BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test", "as np import hoomd from hoomd import md from hoomd", "mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation can happen (with all parameters", "def test_validate_box(self): # initial configuration is invalid slit = mpcd.stream.slit(H=10.)", "# now it should be valid slit.set_params(H=4.) hoomd.run(2) # make", "def setUp(self): # establish the simulation context hoomd.context.initialize() # set", "def test_filler(self): # initialization of a filler slit = mpcd.stream.slit(H=4.)", "0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) #", "particle is matched exactly to the wall speed, and so", "hoomd.run(2) # make sure we can invalidate it again slit.set_params(H=10.)", "seed=42) # removing the filler should still allow a run", "and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second", "so it will translate at # same velocity along +x", "be valid slit.set_params(H=4.) hoomd.run(2) # make sure we can invalidate", "<reponame>schwendp/hoomd-blue # Copyright (c) 2009-2019 The Regents of the University", "now it should be valid slit.set_params(H=4.) hoomd.run(2) # make sure", "raised if we set a bad particle type with self.assertRaises(RuntimeError):", "self.assertTrue(slit._filler is not None) # run should be able to", "the decomposition in z for mpi builds if hoomd.comm.get_num_ranks() >", "testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the system from the", "can happen (with all parameters set) def test_create(self): mpcd.stream.slit(H=4., V=0.1,", "step and check bounce back of particles hoomd.run(1) snap =", "seed=42, type='A') self.assertTrue(slit._filler is not None) # run should be", "hoomd.comm.get_rank() == 0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one", "hoomd.run(1) # assert an error is raised if we set", "# set the decomposition in z for mpi builds if", "boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\") # take one step", "of a filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A')", "test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") #", "of the University of Michigan # This file is part", "def test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary,", "along +x for 3 steps. It will bounce back in", "hoomd.run(1) # changing filler should be allowed slit.set_filler(density=10., kT=1.5, seed=7)", "slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing the filler should still allow", "other parameters stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.)", "is None) hoomd.run(1) def tearDown(self): del self.s if __name__ ==", "test_step_noslip(self): mpcd.stream.slit(H=4.) # take one step hoomd.run(1) snap = self.s.take_snapshot()", "np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second particle has y", "should be allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is not None)", "all happens silently hoomd.run(1) # changing the geometry should still", "check bounce back of particles hoomd.run(1) snap = self.s.take_snapshot() if", "self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)", "where it started. # (vx stays the same, and vy", "bounce back in y and z to where it started.", "slit.set_params(H=4.) hoomd.run(2) # make sure we can invalidate it again", "self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing the filler should still", "hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95])", "is not None) hoomd.run(1) # assert an error is raised", "configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the system from the starting", "and vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) #", "all parameters set) def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\", period=2) #", "self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.)", "# change velocity of lower particle so it is translating", "particle has y and z velocities flip again, and since", "= self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.])", "np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test", "the wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0:", "# test basic stepping behavior with no slip boundary conditions", "has y and z velocities flip again, and since it", "allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is not None) hoomd.run(1) #", "numpy as np import hoomd from hoomd import md from", "def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\")", "relative to wall snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0:", "# (vx stays the same, and vy and vz flip.)", "self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test that virtual particle filler", "y and z velocities flip again, and since it started", "the same, and vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0],", "moves relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) #", "run slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1) def tearDown(self): del self.s", "# initialize the system from the starting snapshot snap =", "slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test", "slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is not", "= mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation can happen (with all", "None) hoomd.run(1) # assert an error is raised if we", "np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that setting the slit size too", "same, and vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.])", "valid slit.set_params(H=4.) hoomd.run(2) # make sure we can invalidate it", "OK with a run slit.set_params(V=1.0) hoomd.run(1) # changing filler should", "steps. It will bounce back in y and z to", "flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second particle has", "behavior with slip boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\") #", "this all happens silently hoomd.run(1) # changing the geometry should", "set the decomposition in z for mpi builds if hoomd.comm.get_num_ranks()", "the University of Michigan # This file is part of", "wall speed, and so it will translate at # same", "matched exactly to the wall speed, and so it will", "default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) # initialize the system from", "lower particle so it is translating relative to wall snap", "3 steps. It will bounce back in y and z", "no slip boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) # take one", "= self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.])", "self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1],", "self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\")", "filler can be attached, removed, and updated def test_filler(self): #", "silently hoomd.run(1) # changing the geometry should still be OK", "translate at # same velocity along +x for 3 steps.", "if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0])", "be attached, removed, and updated def test_filler(self): # initialization of", "a run slit.set_params(V=1.0) hoomd.run(1) # changing filler should be allowed", "hoomd.run(1) # now it should be valid slit.set_params(H=4.) hoomd.run(2) #", "slit size too large raises an error def test_validate_box(self): #", "[-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another", "if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0])", "snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) # run one step and check", "self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1)", "raises an error def test_validate_box(self): # initial configuration is invalid", "particles out of bounds can be caught def test_out_of_bounds(self): slit", "0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(),", "the second particle through the boundary hoomd.run(1) snap = self.s.take_snapshot()", "same velocity along +x for 3 steps. It will bounce", "ensure other parameters stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V,", "# it moves relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1],", "file is part of the HOOMD-blue project, released under the", "setting parameters def test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V,", "# take another step where one particle will now hit", "# initial configuration is invalid slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError):", "for mpcd slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): #", "slip boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\") # take one", "self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1],", "wall snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: snap.particles.velocity[1] =", "particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B') # assert", "Michigan # This file is part of the HOOMD-blue project,", "slit.set_params(H=3.85) hoomd.run(1) # test that virtual particle filler can be", "builds if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) # default testing configuration", "[[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test", "self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.)", "the starting snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:]", "[-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another", "= mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1) # test that", "# This file is part of the HOOMD-blue project, released", "a run slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1) def tearDown(self): del", "the HOOMD-blue project, released under the BSD 3-Clause License. #", "is translating relative to wall snap = self.s.take_snapshot() if hoomd.comm.get_rank()", "context hoomd.context.initialize() # set the decomposition in z for mpi", "the boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0:", "= self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.])", "[-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where one particle", "test for invalid boundary conditions being set def test_bad_boundary(self): slit", "run should be able to setup the filler, although this", "basic stepping behavior with slip boundary conditions def test_step_slip(self): mpcd.stream.slit(H=4.,", "2009-2019 The Regents of the University of Michigan # This", "if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) # default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0,", "mpcd.stream.slit(H=4.) # take one step hoomd.run(1) snap = self.s.take_snapshot() if", "back in y and z to where it started. #", "H and also ensure other parameters stay the same slit.set_params(H=2.)", "to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic", "removed, and updated def test_filler(self): # initialization of a filler", "basic stepping behavior with no slip boundary conditions def test_step_noslip(self):", "it is translating relative to wall snap = self.s.take_snapshot() if", "0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(),", "will now hit the wall hoomd.run(1) snap = self.s.take_snapshot() if", "happen (with all parameters set) def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\",", "0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) #", "# run should be able to setup the filler, although", "part of the HOOMD-blue project, released under the BSD 3-Clause", "from hoomd import mpcd # unit tests for mpcd slit", "np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step", "mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0, period=3) # change velocity of lower particle", "for invalid boundary conditions being set def test_bad_boundary(self): slit =", "slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") # test basic stepping behavior", "of particles hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0:", "updated def test_filler(self): # initialization of a filler slit =", "set) def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\", period=2) # test for", "test that setting the slit size too large raises an", "particle through the boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank()", "vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the", "and z velocities flip again, and since it started closer,", "invalid slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # now it", "self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change", "tests for mpcd slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self):", "z velocities flip again, and since it started closer, #", "self.s.take_snapshot() if hoomd.comm.get_rank() == 0: snap.particles.velocity[1] = [-2.,-1.,-1.] self.s.restore_snapshot(snap) #", "exactly to the wall speed, and so it will translate", "boundary=\"no_slip\", V=1.0, period=3) # change velocity of lower particle so", "make sure we can invalidate it again slit.set_params(H=10.) with self.assertRaises(RuntimeError):", "decomposition in z for mpi builds if hoomd.comm.get_num_ranks() > 1:", "from the starting snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]]", "again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # test that particles out", "change BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for", "self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change", "step where one particle will now hit the wall hoomd.run(1)", "mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is not None) #", "0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def", "snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0],", "too large raises an error def test_validate_box(self): # initial configuration", "closer, # it moves relative to original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9])", "class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish the simulation context hoomd.context.initialize()", "z for mpi builds if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) #", "kT=1.5, seed=7) self.assertTrue(slit._filler is not None) hoomd.run(1) # assert an", "error is raised if we set a bad density with", "initial configuration is invalid slit = mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1)", "from hoomd import md from hoomd import mpcd # unit", "BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for invalid", "[-1.,-1.,-1.]) # take another step, wrapping the second particle through", "= [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) #", "it started closer, # it moves relative to original position.", "slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1) def tearDown(self): del self.s if", "The Regents of the University of Michigan # This file", "parameters def test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.)", "self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for invalid boundary conditions being set", "if we set a bad particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5.,", "one step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0:", "== 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])", "in z for mpi builds if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2)", "# test that setting the slit size too large raises", "np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0, period=3)", "This file is part of the HOOMD-blue project, released under", "another step, wrapping the second particle through the boundary hoomd.run(1)", "can be caught def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError):", "3-Clause License. # Maintainer: mphoward import unittest import numpy as", "if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,4.75,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9])", "test_set_params(self): slit = mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\")", "initialize the system from the starting snapshot snap = mpcd.data.make_snapshot(N=2)", "run one step and check bounce back of particles hoomd.run(1)", "we set a bad particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0,", "we can invalidate it again slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) #", "although this all happens silently hoomd.run(1) # changing the geometry", "seed=7) self.assertTrue(slit._filler is not None) hoomd.run(1) # assert an error", "import numpy as np import hoomd from hoomd import md", "snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0],", "with no slip boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) # take", "# test for invalid boundary conditions being set def test_bad_boundary(self):", "# test basic stepping behavior with slip boundary conditions def", "second particle has y and z velocities flip again, and", "the BSD 3-Clause License. # Maintainer: mphoward import unittest import", "filler should still allow a run slit.remove_filler() self.assertTrue(slit._filler is None)", "def tearDown(self): del self.s if __name__ == '__main__': unittest.main(argv =", "is not None) # run should be able to setup", "slip boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) # take one step", "test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\") # take one step hoomd.run(1) snap =", "conditions def test_step_noslip(self): mpcd.stream.slit(H=4.) # take one step hoomd.run(1) snap", "[1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step where", "hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1],", "stays the same, and vy and vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85])", "slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is not None) hoomd.run(1) # assert", "starting snapshot snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] =", "[0.,1.,1.]) # test basic stepping behavior with slip boundary conditions", "np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another step, wrapping the second particle", "It will bounce back in y and z to where", "of the HOOMD-blue project, released under the BSD 3-Clause License.", "mpcd slit streaming geometry class mpcd_stream_slit_test(unittest.TestCase): def setUp(self): # establish", "velocity of lower particle so it is translating relative to", "tearDown(self): del self.s if __name__ == '__main__': unittest.main(argv = ['test.py',", "[-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that setting the slit size", "stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\")", "V=1.0, period=3) # change velocity of lower particle so it", "def test_step_slip(self): mpcd.stream.slit(H=4., boundary=\"slip\") # take one step hoomd.run(1) snap", "= [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation can", "caught def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85)", "hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85])", "conditions being set def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\")", "def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\", V=1.0, period=3) # change velocity of", "np import hoomd from hoomd import md from hoomd import", "changing filler should be allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler is", "hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1],", "snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0],", "set def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with self.assertRaises(ValueError):", "hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95])", "4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(),", "original position. np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.4,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic stepping", "self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for invalid boundary conditions", "parameters set) def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\", period=2) # test", "4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H and also", "None) hoomd.run(1) def tearDown(self): del self.s if __name__ == '__main__':", "self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,1.]) np.testing.assert_array_almost_equal(snap.particles.position[1],", "self.assertRaises(RuntimeError): hoomd.run(1) # now it should be valid slit.set_params(H=4.) hoomd.run(2)", "hoomd.run(1) # changing the geometry should still be OK with", "an error def test_validate_box(self): # initial configuration is invalid slit", "if we set a bad density with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0,", "np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.,1.,1.]) # test basic stepping behavior with slip boundary", "self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1)", "# run one step and check bounce back of particles", "\"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H", "snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation", "be able to setup the filler, although this all happens", "be OK with a run slit.set_params(V=1.0) hoomd.run(1) # changing filler", "None) # run should be able to setup the filler,", "a bad particle type with self.assertRaises(RuntimeError): slit.set_filler(density=5., kT=1.0, seed=42, type='B')", "allow a run slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1) def tearDown(self):", "large raises an error def test_validate_box(self): # initial configuration is", "hoomd.context.initialize() # set the decomposition in z for mpi builds", "where one particle will now hit the wall hoomd.run(1) snap", "[1.,1.,-1.]) # the second particle has y and z velocities", "vz flip.) np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second particle", "not None) # run should be able to setup the", "still allow a run slit.remove_filler() self.assertTrue(slit._filler is None) hoomd.run(1) def", "that setting the slit size too large raises an error", "also ensure other parameters stay the same slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.)", "self.s.restore_snapshot(snap) # run one step and check bounce back of", "= mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # now it should be", "speed, and so it will translate at # same velocity", "# take one step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank()", "# changing filler should be allowed slit.set_filler(density=10., kT=1.5, seed=7) self.assertTrue(slit._filler", "test that virtual particle filler can be attached, removed, and", "mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap)", "self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip)", "with self.assertRaises(RuntimeError): hoomd.run(1) # test that particles out of bounds", "slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary,", "with self.assertRaises(RuntimeError): slit.set_filler(density=-1.0, kT=1.0, seed=42) # removing the filler should", "velocity along +x for 3 steps. It will bounce back", "a filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler", "type='B') # assert an error is raised if we set", "import hoomd from hoomd import md from hoomd import mpcd", "test_validate_box(self): # initial configuration is invalid slit = mpcd.stream.slit(H=10.) with", "slit.set_params(boundary=\"invalid\") # test basic stepping behavior with no slip boundary", "# the first particle is matched exactly to the wall", "[[1.,-1.,1.],[-1.,-1.,-1.]] self.s = mpcd.init.read_snapshot(snap) mpcd.integrator(dt=0.1) # test creation can happen", "the filler should still allow a run slit.remove_filler() self.assertTrue(slit._filler is", "self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H and also ensure", "# change H and also ensure other parameters stay the", "# Copyright (c) 2009-2019 The Regents of the University of", "boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0],", "for mpi builds if hoomd.comm.get_num_ranks() > 1: hoomd.comm.decomposition(nz=2) # default", "through the boundary hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() ==", "[-4.75,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second particle has y and", "def test_out_of_bounds(self): slit = mpcd.stream.slit(H=3.8) with self.assertRaises(RuntimeError): hoomd.run(1) slit.set_params(H=3.85) hoomd.run(1)", "invalid boundary conditions being set def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.)", "test basic stepping behavior with no slip boundary conditions def", "self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H and", "with self.assertRaises(ValueError): slit.set_params(boundary=\"invalid\") # test basic stepping behavior with no", "= mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42, type='A') self.assertTrue(slit._filler is not None)", "slit.set_params(H=2.) self.assertAlmostEqual(slit.H, 2.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 2.) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(),", "step hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0],", "hoomd.run(1) # test that particles out of bounds can be", "# establish the simulation context hoomd.context.initialize() # set the decomposition", "if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9])", "able to setup the filler, although this all happens silently", "will bounce back in y and z to where it", "initialization of a filler slit = mpcd.stream.slit(H=4.) slit.set_filler(density=5., kT=1.0, seed=42,", "(with all parameters set) def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\", period=2)", "np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4., boundary=\"no_slip\",", "hoomd from hoomd import md from hoomd import mpcd #", "being set def test_bad_boundary(self): slit = mpcd.stream.slit(H=4.) slit.set_params(boundary=\"no_slip\") slit.set_params(boundary=\"slip\") with", "bounce back of particles hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank()", "[-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) # take another", "project, released under the BSD 3-Clause License. # Maintainer: mphoward", "0.) self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip) # change H and also ensure other", "= mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s =", "mpcd.stream.slit(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # now it should be valid", "# the second particle has y and z velocities flip", "slit.set_params(H=10.) with self.assertRaises(RuntimeError): hoomd.run(1) # test that particles out of", "with a run slit.set_params(V=1.0) hoomd.run(1) # changing filler should be", "self.s.take_snapshot() if hoomd.comm.get_rank() == 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1],", "should be valid slit.set_params(H=4.) hoomd.run(2) # make sure we can", "second particle through the boundary hoomd.run(1) snap = self.s.take_snapshot() if", "of Michigan # This file is part of the HOOMD-blue", "period=3) # change velocity of lower particle so it is", "stepping behavior with no slip boundary conditions def test_step_noslip(self): mpcd.stream.slit(H=4.)", "0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.95,4.95,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.]) #", "[1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that setting the", "hoomd.run(1) # test that virtual particle filler can be attached,", "BSD 3-Clause License. # Maintainer: mphoward import unittest import numpy", "def test_create(self): mpcd.stream.slit(H=4., V=0.1, boundary=\"no_slip\", period=2) # test for setting", "> 1: hoomd.comm.decomposition(nz=2) # default testing configuration hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.))) #", "np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,1.,-1.]) # the second particle has y and z", "assert an error is raised if we set a bad", "change velocity of lower particle so it is translating relative", "# test for setting parameters def test_set_params(self): slit = mpcd.stream.slit(H=4.)", "released under the BSD 3-Clause License. # Maintainer: mphoward import", "and since it started closer, # it moves relative to", "# changing the geometry should still be OK with a", "Copyright (c) 2009-2019 The Regents of the University of Michigan", "[4.95,-4.95,3.85]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [-1.,1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.1,-0.1,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [1.,1.,1.]) def test_step_moving_wall(self): mpcd.stream.slit(H=4.,", "slit.set_params(boundary=\"slip\") self.assertEqual(slit.boundary, \"slip\") self.assertEqual(slit._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip) # test for invalid boundary", "= mpcd.stream.slit(H=4.) self.assertAlmostEqual(slit.H, 4.) self.assertAlmostEqual(slit.V, 0.) self.assertEqual(slit.boundary, \"no_slip\") self.assertAlmostEqual(slit._cpp.geometry.getH(), 4.)", "Regents of the University of Michigan # This file is", "now hit the wall hoomd.run(1) snap = self.s.take_snapshot() if hoomd.comm.get_rank()", "V slit.set_params(V=0.1) self.assertAlmostEqual(slit.V, 0.1) self.assertAlmostEqual(slit._cpp.geometry.getVelocity(), 0.1) # change BCs slit.set_params(boundary=\"slip\")", "== 0: np.testing.assert_array_almost_equal(snap.particles.position[0], [-4.85,4.85,3.95]) np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.2,-0.2,-4.0]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,-1.])", "[-1.,-1.,1.]) # test that setting the slit size too large", "self.assertRaises(RuntimeError): hoomd.run(1) # test that particles out of bounds can", "np.testing.assert_array_almost_equal(snap.particles.velocity[0], [1.,-1.,-1.]) np.testing.assert_array_almost_equal(snap.particles.position[1], [-0.3,-0.3,-3.9]) np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1.,-1.,1.]) # test that setting", "snap = mpcd.data.make_snapshot(N=2) snap.particles.position[:] = [[4.95,-4.95,3.85],[0.,0.,-3.8]] snap.particles.velocity[:] = [[1.,-1.,1.],[-1.,-1.,-1.]] self.s", "hoomd.run(1) def tearDown(self): del self.s if __name__ == '__main__': unittest.main(argv", "simulation context hoomd.context.initialize() # set the decomposition in z for", "started. # (vx stays the same, and vy and vz" ]
[ "models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto,", "self.assertEqual( 'transformed_' + user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key) def", "2.0 (the \"License\"); # you may not use this file", "limitations under the License. \"\"\"Functional tests for models.models.\"\"\" __author__ =", "the License is distributed on an \"AS-IS\" BASIS, # WITHOUT", "= models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description =", "\"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self):", "from tests.functional import actions # Disable complaints about docstrings for", "self.third_question_group_id = 6 self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description, 'items':", "= student.put() exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id)", "models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group'", "self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group' self.second_question_group_id = 5", "test_for_export_transforms_correctly(self): user_id = '1' student = models.Student(key_name='name', user_id='1', is_enrolled=True) key", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group' self.second_question_group_id = 5 self.second_question_group_dto = models.QuestionGroupDTO(", "models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id = 2 self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id,", "self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth", "= 2 self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id = 3", "test_for_export_transforms_correctly(self): event = models.EventEntity(source='source', user_id='1') key = event.put() exported =", "user_id=user_id) student.put() property_name = 'property-name' student_property_key = models.StudentPropertyEntity.create( student, property_name).put()", "= 'user_id' student = models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name = 'property-name'", "use this file except in compliance with the License. #", "self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description,", "self.used_once_question_dto, self.unused_question_dto]) # Handcoding the dicts. This is dangerous because", "exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_'", "implementations could fall out of sync, and these tests #", "[{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group' self.third_question_group_id = 6 self.third_question_group_dto =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "[{'question': str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group' self.second_question_group_id = 5 self.second_question_group_dto =", "License. # You may obtain a copy of the License", "contents.\"\"\" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id = 1 self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id,", "'second_question_group' self.second_question_group_id = 5 self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description,", "test_safe_key_transforms_name(self): key = models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase):", "class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event = models.EventEntity(source='source', user_id='1') key =", "test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put()", "property_name = 'property-name' student_property_key = models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual( 'transformed_%s-%s'", "[{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description],", "7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id =", "License for the specific language governing permissions and # limitations", "legal_name=legal_name, nick_name=nick_name) profile.put() exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id),", "user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id =", "models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id = '1' student =", "Reserved. # # Licensed under the Apache License, Version 2.0", "'legal_name' nick_name = 'nick_name' user_id = '1' profile = models.PersonalProfile(", "event.put() exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id)", "# limitations under the License. \"\"\"Functional tests for models.models.\"\"\" __author__", "models from tests.functional import actions # Disable complaints about docstrings", "self.unused_question_id = 3 self.unused_question_dto = models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto,", "self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id = 2 self.used_once_question_dto =", "'user_id' student = models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self):", "distributed under the License is distributed on an \"AS-IS\" BASIS,", "key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put() exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual(", "exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_' + user_id, exported.key_by_user_id.name()) self.assertEqual(", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "disable-msg=g-bad-name def setUp(self): \"\"\"Sets up datastore contents.\"\"\" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id", "test_safe_key_transforms_user_id_component(self): user_id = 'user_id' student = models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name", "Copyright 2013 Google Inc. All Rights Reserved. # # Licensed", "# Handcoding the dicts. This is dangerous because they're handcoded", "= answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def", "exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase):", "distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "setUp(self): \"\"\"Sets up datastore contents.\"\"\" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id = 1", "def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id =", "\"\"\"Sets up datastore contents.\"\"\" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id = 1 self.used_twice_question_dto", "def test_safe_key_transforms_name(self): key = models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name()) class", "disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event = models.EventEntity(source='source', user_id='1') key", "models.Student(key_name='name', user_id='1', is_enrolled=True) key = student.put() exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student,", "tests # may then pass erroneously. self.first_question_group_description = 'first_question_group' self.first_question_group_id", "# distributed under the License is distributed on an \"AS-IS\"", "self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id))", "Handcoding the dicts. This is dangerous because they're handcoded #", "models.EventEntity(source='source', user_id='1') key = event.put() exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported)", "user_id = '1' profile = models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name,", "'property-name' student_property_key = models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual( 'transformed_%s-%s' % (user_id,", "for models.models.\"\"\" __author__ = [ '<EMAIL> (<NAME>)', ] import datetime", "self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests for QuestionDAO.\"\"\" # Name", "self.first_question_group_id, {'description': self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group' self.second_question_group_id", "self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id = '1'", "models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id = 'user_id'", "import models from tests.functional import actions # Disable complaints about", "models.Student.safe_key(key, self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id = 'user_id' student =", "models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key", "self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "{}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) # Handcoding the dicts. This", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today() email = '<EMAIL>' legal_name", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put() exported = profile.for_export(self.transform)", "# See the License for the specific language governing permissions", "def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description],", "or agreed to in writing, software # distributed under the", "student.get_key().name()) def test_safe_key_transforms_name(self): key = models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name())", "required by applicable law or agreed to in writing, software", "erroneously. self.first_question_group_description = 'first_question_group' self.first_question_group_id = 4 self.first_question_group_dto = models.QuestionGroupDTO(", "= models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self): key = models.Student(key_name='name').put() self.assertEqual(", "test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id))", "with the License. # You may obtain a copy of", "= 'second_question_group' self.second_question_group_id = 5 self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id, {'description':", "__author__ = [ '<EMAIL> (<NAME>)', ] import datetime from models", "License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES", "student = models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name = 'property-name' student_property_key =", "event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform))", "key = models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def", "is dangerous because they're handcoded # elsewhere, the implementations could", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "import datetime from models import models from tests.functional import actions", "models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id = 3 self.unused_question_dto = models.QuestionDTO( self.unused_question_id,", "self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([", "Inc. All Rights Reserved. # # Licensed under the Apache", "student = models.Student(key_name='name', user_id='1', is_enrolled=True) key = student.put() exported =", "= '1' profile = models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name)", "up datastore contents.\"\"\" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id = 1 self.used_twice_question_dto =", "'1' profile = models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put()", "email = '<EMAIL>' legal_name = 'legal_name' nick_name = 'nick_name' user_id", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "self.second_question_group_description = 'second_question_group' self.second_question_group_id = 5 self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id,", "sync, and these tests # may then pass erroneously. self.first_question_group_description", "self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def", "not use this file except in compliance with the License.", "from models import models from tests.functional import actions # Disable", "writing, software # distributed under the License is distributed on", "test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def", "self.third_question_group_description = 'third_question_group' self.third_question_group_id = 6 self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id,", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "= models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) # Handcoding", "# Disable complaints about docstrings for self-documenting tests. # pylint:", "= models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put() exported =", "= profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional", "self.used_twice_question_id = 1 self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id =", "test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today() email = '<EMAIL>' legal_name = 'legal_name'", "self.assertEqual( self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests for QuestionDAO.\"\"\" #", "dicts. This is dangerous because they're handcoded # elsewhere, the", "CONDITIONS OF ANY KIND, either express or implied. # See", "docstrings for self-documenting tests. # pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def", "self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class", "class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today() email = '<EMAIL>'", "def test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key =", "# Copyright 2013 Google Inc. All Rights Reserved. # #", "determined by parent. pylint: disable-msg=g-bad-name def setUp(self): \"\"\"Sets up datastore", "models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual( 'transformed_%s-%s' % (user_id, property_name), models.StudentPropertyEntity.safe_key( student_property_key,", "= models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name())", "these tests # may then pass erroneously. self.first_question_group_description = 'first_question_group'", "def test_for_export_transforms_correctly(self): event = models.EventEntity(source='source', user_id='1') key = event.put() exported", "= models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description =", "self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) # Handcoding the dicts. This is dangerous", "self).setUp() self.used_twice_question_id = 1 self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id", "License. \"\"\"Functional tests for models.models.\"\"\" __author__ = [ '<EMAIL> (<NAME>)',", "= event.put() exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1',", "self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests for", "= models.EventEntity(source='source', user_id='1') key = event.put() exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event,", "self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def", "super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id = 1 self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id, {})", "= 'property-name' student_property_key = models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual( 'transformed_%s-%s' %", "models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def", "complaints about docstrings for self-documenting tests. # pylint: disable-msg=g-missing-docstring class", "self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id = 'user_id' student = models.Student(key_name='name',", "self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id", "= models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name = 'property-name' student_property_key = models.StudentPropertyEntity.create(", "'user_id' student = models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name = 'property-name' student_property_key", "then pass erroneously. self.first_question_group_description = 'first_question_group' self.first_question_group_id = 4 self.first_question_group_dto", "self.third_question_group_id, {'description': self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto])", "test_get_key_does_not_transform_by_default(self): user_id = 'user_id' student = models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id,", "OR CONDITIONS OF ANY KIND, either express or implied. #", "models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id", "QuestionDAO.\"\"\" # Name determined by parent. pylint: disable-msg=g-bad-name def setUp(self):", "def test_for_export_transforms_correctly(self): user_id = '1' student = models.Student(key_name='name', user_id='1', is_enrolled=True)", "they're handcoded # elsewhere, the implementations could fall out of", "self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id = 3 self.unused_question_dto =", "self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description", "'third_question_group' self.third_question_group_id = 6 self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description,", "'nick_name' user_id = '1' profile = models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id,", "'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id =", "about docstrings for self-documenting tests. # pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase):", "\"\"\"Functional tests for models.models.\"\"\" __author__ = [ '<EMAIL> (<NAME>)', ]", "law or agreed to in writing, software # distributed under", "an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "2013 Google Inc. All Rights Reserved. # # Licensed under", "self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group' self.third_question_group_id = 6", "tests for QuestionDAO.\"\"\" # Name determined by parent. pylint: disable-msg=g-bad-name", "self.first_question_group_description = 'first_question_group' self.first_question_group_id = 4 self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id,", "def test_safe_key_transforms_user_id_component(self): user_id = 'user_id' student = models.Student(key_name='<EMAIL>', user_id=user_id) student.put()", "# may then pass erroneously. self.first_question_group_description = 'first_question_group' self.first_question_group_id =", "models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) # Handcoding the", "self.assertEqual( 'transformed_name', models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key =", "2 self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id = 3 self.unused_question_dto", "and # limitations under the License. \"\"\"Functional tests for models.models.\"\"\"", "= 'third_question_group' self.third_question_group_id = 6 self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id, {'description':", "def test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase):", "for QuestionDAO.\"\"\" # Name determined by parent. pylint: disable-msg=g-bad-name def", "self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) # Handcoding the dicts.", "governing permissions and # limitations under the License. \"\"\"Functional tests", "may obtain a copy of the License at # #", "nick_name=nick_name) profile.put() exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id), exported.safe_key.name())", "event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self):", "1 self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id = 2 self.used_once_question_dto", "# pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event = models.EventEntity(source='source',", "Disable complaints about docstrings for self-documenting tests. # pylint: disable-msg=g-missing-docstring", "self.second_question_group_id = 5 self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description, 'items':", "= models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual( 'transformed_%s-%s' % (user_id, property_name), models.StudentPropertyEntity.safe_key(", "{'description': self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def", "may not use this file except in compliance with the", "because they're handcoded # elsewhere, the implementations could fall out", "could fall out of sync, and these tests # may", "self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id = 'user_id' student", "answers_key = answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase):", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= 'user_id' student = models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name()) def", "this file except in compliance with the License. # You", "6 self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]})", "def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today() email = '<EMAIL>' legal_name =", "answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key,", "self.third_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self):", "StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id = '1' student = models.Student(key_name='name', user_id='1',", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "Name determined by parent. pylint: disable-msg=g-bad-name def setUp(self): \"\"\"Sets up", "parent. pylint: disable-msg=g-bad-name def setUp(self): \"\"\"Sets up datastore contents.\"\"\" super(QuestionDAOTestCase,", "= '<EMAIL>' legal_name = 'legal_name' nick_name = 'nick_name' user_id =", "file except in compliance with the License. # You may", "user_id='1', is_enrolled=True) key = student.put() exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported)", "student.put() exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(", "[ '<EMAIL> (<NAME>)', ] import datetime from models import models", "datastore contents.\"\"\" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id = 1 self.used_twice_question_dto = models.QuestionDTO(", "self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_' + user_id, exported.key_by_user_id.name())", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.used_once_question_id = 2 self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id =", "self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today()", "= student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_' +", "self.unused_question_dto = models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) #", "\"\"\"Functional tests for QuestionDAO.\"\"\" # Name determined by parent. pylint:", "] import datetime from models import models from tests.functional import", "self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self): key = models.Student(key_name='name').put() self.assertEqual( 'transformed_name', models.Student.safe_key(key,", "may then pass erroneously. self.first_question_group_description = 'first_question_group' self.first_question_group_id = 4", "Google Inc. All Rights Reserved. # # Licensed under the", "under the License. \"\"\"Functional tests for models.models.\"\"\" __author__ = [", "student_property_key = models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual( 'transformed_%s-%s' % (user_id, property_name),", "= models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put() self.assertEqual( 'transformed_name',", "permissions and # limitations under the License. \"\"\"Functional tests for", "'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group' self.second_question_group_id = 5 self.second_question_group_dto", "str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id))", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7", "{}) self.unused_question_id = 3 self.unused_question_dto = models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([", "under the License is distributed on an \"AS-IS\" BASIS, #", "= 'legal_name' nick_name = 'nick_name' user_id = '1' profile =", "on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group' self.third_question_group_id = 6 self.third_question_group_dto", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "= 5 self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description, 'items': [{'question':", "self.used_twice_question_id, {}) self.used_once_question_id = 2 self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id, {})", "key = student.put() exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1',", "self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_' + user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform),", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "= 4 self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description, 'items': [{'question':", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "(<NAME>)', ] import datetime from models import models from tests.functional", "# elsewhere, the implementations could fall out of sync, and", "profile.put() exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id), exported.safe_key.name()) class", "import actions # Disable complaints about docstrings for self-documenting tests.", "self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id = 'user_id' student =", "'transformed_name', models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put()", "self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id))", "= [ '<EMAIL> (<NAME>)', ] import datetime from models import", "(the \"License\"); # you may not use this file except", "self-documenting tests. # pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event", "# you may not use this file except in compliance", "self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual(", "of sync, and these tests # may then pass erroneously.", "tests.functional import actions # Disable complaints about docstrings for self-documenting", "the License. \"\"\"Functional tests for models.models.\"\"\" __author__ = [ '<EMAIL>", "This is dangerous because they're handcoded # elsewhere, the implementations", "# # Unless required by applicable law or agreed to", "= event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "fall out of sync, and these tests # may then", "Version 2.0 (the \"License\"); # you may not use this", "implied. # See the License for the specific language governing", "= models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id = 2 self.used_once_question_dto = models.QuestionDTO(", "under the Apache License, Version 2.0 (the \"License\"); # you", "answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self):", "'items': [{'question': str(self.used_twice_question_id)}]}) models.QuestionGroupDAO.save_all([ self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual(", "for self-documenting tests. # pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self):", "language governing permissions and # limitations under the License. \"\"\"Functional", "models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key( answers_key, self.transform).name()) class", "by applicable law or agreed to in writing, software #", "event = models.EventEntity(source='source', user_id='1') key = event.put() exported = event.for_export(self.transform)", "date_of_birth = datetime.date.today() email = '<EMAIL>' legal_name = 'legal_name' nick_name", "QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests for QuestionDAO.\"\"\" # Name determined by parent.", "self.second_question_group_id, {'description': self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group' self.third_question_group_id", "{'description': self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group' self.second_question_group_id =", "{'description': self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group' self.third_question_group_id =", "class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id = 'user_id' student = models.Student(key_name='<EMAIL>',", "self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id = '1' student", "handcoded # elsewhere, the implementations could fall out of sync,", "= 'nick_name' user_id = '1' profile = models.PersonalProfile( date_of_birth=date_of_birth, email=email,", "pass erroneously. self.first_question_group_description = 'first_question_group' self.first_question_group_id = 4 self.first_question_group_dto =", "exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source) self.assertEqual('transformed_1', exported.user_id) self.assertEqual(key,", "= 6 self.third_question_group_dto = models.QuestionGroupDTO( self.third_question_group_id, {'description': self.third_question_group_description, 'items': [{'question':", "'<EMAIL> (<NAME>)', ] import datetime from models import models from", "tests. # pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event =", "self.unused_question_dto]) # Handcoding the dicts. This is dangerous because they're", "class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id = '1' student = models.Student(key_name='name',", "Unless required by applicable law or agreed to in writing,", "= models.Student(key_name='name', user_id='1', is_enrolled=True) key = student.put() exported = student.for_export(self.transform)", "self.used_once_question_id, {}) self.unused_question_id = 3 self.unused_question_dto = models.QuestionDTO( self.unused_question_id, {})", "the implementations could fall out of sync, and these tests", "self.first_question_group_dto, self.second_question_group_dto, self.third_question_group_dto]) def test_used_by_returns_description_of_single_question_group(self): self.assertEqual( [self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self):", "exported.user_id) self.assertEqual(key, models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth =", "the specific language governing permissions and # limitations under the", "datetime from models import models from tests.functional import actions #", "applicable law or agreed to in writing, software # distributed", "models.EventEntity.safe_key(key, self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today() email", "StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key", "out of sync, and these tests # may then pass", "actions # Disable complaints about docstrings for self-documenting tests. #", "models.models.\"\"\" __author__ = [ '<EMAIL> (<NAME>)', ] import datetime from", "profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests", "and these tests # may then pass erroneously. self.first_question_group_description =", "in writing, software # distributed under the License is distributed", "{}) self.used_once_question_id = 2 self.used_once_question_dto = models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id", "date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put() exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile,", "models.Student.safe_key(key, self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put() answers", "class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests for QuestionDAO.\"\"\" # Name determined by", "self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_' + user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key,", "EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event = models.EventEntity(source='source', user_id='1') key = event.put()", "3 self.unused_question_dto = models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto])", "= datetime.date.today() email = '<EMAIL>' legal_name = 'legal_name' nick_name =", "'transformed_' + user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self):", "exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id = 'user_id'", "models import models from tests.functional import actions # Disable complaints", "self.first_question_group_id = 4 self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description, 'items':", "is_enrolled=True) key = student.put() exported = student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled)", "License, Version 2.0 (the \"License\"); # you may not use", "exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported) self.assertEqual( self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase):", "user_id = '1' student = models.Student(key_name='name', user_id='1', is_enrolled=True) key =", "# You may obtain a copy of the License at", "dangerous because they're handcoded # elsewhere, the implementations could fall", "user_id = 'user_id' student = models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name =", "exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id = 'user_id' student = models.Student(key_name='name', user_id=user_id)", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "nick_name = 'nick_name' user_id = '1' profile = models.PersonalProfile( date_of_birth=date_of_birth,", "models.Student(key_name='<EMAIL>', user_id=user_id) student.put() property_name = 'property-name' student_property_key = models.StudentPropertyEntity.create( student,", "[self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([],", "'first_question_group' self.first_question_group_id = 4 self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description,", "= models.QuestionDTO( self.used_once_question_id, {}) self.unused_question_id = 3 self.unused_question_dto = models.QuestionDTO(", "StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id = 'user_id' student = models.Student(key_name='<EMAIL>', user_id=user_id)", "[self.first_question_group_description], models.QuestionDAO.used_by(self.used_once_question_id)) def test_used_by_returns_descriptions_of_multiple_question_groups(self): self.assertEqual( [self.second_question_group_description, self.third_question_group_description], models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self):", "the License for the specific language governing permissions and #", "email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put() exported = profile.for_export(self.transform) self.assert_blacklisted_properties_removed(profile, exported)", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "'1' student = models.Student(key_name='name', user_id='1', is_enrolled=True) key = student.put() exported", "models.QuestionDAO.used_by(self.used_twice_question_id)) def test_used_by_returns_empty_list_for_unused_question(self): not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class", "elsewhere, the implementations could fall out of sync, and these", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= 1 self.used_twice_question_dto = models.QuestionDTO( self.used_twice_question_id, {}) self.used_once_question_id = 2", "not_found_id = 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self):", "= '1' student = models.Student(key_name='name', user_id='1', is_enrolled=True) key = student.put()", "student.for_export(self.transform) self.assert_blacklisted_properties_removed(student, exported) self.assertTrue(exported.is_enrolled) self.assertEqual('transformed_1', exported.user_id) self.assertEqual( 'transformed_' + user_id,", "tests for models.models.\"\"\" __author__ = [ '<EMAIL> (<NAME>)', ] import", "exported) self.assertEqual( self.transform(user_id), exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests for QuestionDAO.\"\"\"", "= 'first_question_group' self.first_question_group_id = 4 self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id, {'description':", "exported.safe_key.name()) class QuestionDAOTestCase(actions.TestBase): \"\"\"Functional tests for QuestionDAO.\"\"\" # Name determined", "datetime.date.today() email = '<EMAIL>' legal_name = 'legal_name' nick_name = 'nick_name'", "= models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self): key =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "student, property_name).put() self.assertEqual( 'transformed_%s-%s' % (user_id, property_name), models.StudentPropertyEntity.safe_key( student_property_key, self.transform).name())", "by parent. pylint: disable-msg=g-bad-name def setUp(self): \"\"\"Sets up datastore contents.\"\"\"", "models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]}) self.second_question_group_description = 'second_question_group'", "# Name determined by parent. pylint: disable-msg=g-bad-name def setUp(self): \"\"\"Sets", "key = event.put() exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source', event.source)", "self.transform)) class PersonalProfileTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly_and_sets_safe_key(self): date_of_birth = datetime.date.today() email =", "= 3 self.unused_question_dto = models.QuestionDTO( self.unused_question_id, {}) models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto,", "answers_key, self.transform).name()) class StudentPropertyEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_user_id_component(self): user_id = 'user_id' student", "student = models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self): key", "5 self.second_question_group_dto = models.QuestionGroupDTO( self.second_question_group_id, {'description': self.second_question_group_description, 'items': [{'question': str(self.used_twice_question_id)}]})", "pylint: disable-msg=g-bad-name def setUp(self): \"\"\"Sets up datastore contents.\"\"\" super(QuestionDAOTestCase, self).setUp()", "is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR", "\"License\"); # you may not use this file except in", "the dicts. This is dangerous because they're handcoded # elsewhere,", "'<EMAIL>' legal_name = 'legal_name' nick_name = 'nick_name' user_id = '1'", "models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self): key = models.Student(key_name='name').put()", "def test_get_key_does_not_transform_by_default(self): user_id = 'user_id' student = models.Student(key_name='name', user_id=user_id) student.put()", "legal_name = 'legal_name' nick_name = 'nick_name' user_id = '1' profile", "# Unless required by applicable law or agreed to in", "exported.user_id) self.assertEqual( 'transformed_' + user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key)", "student_key = models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put() self.assertEqual(", "student.put() property_name = 'property-name' student_property_key = models.StudentPropertyEntity.create( student, property_name).put() self.assertEqual(", "+ user_id, exported.key_by_user_id.name()) self.assertEqual( models.Student.safe_key(key, self.transform), exported.safe_key) def test_get_key_does_not_transform_by_default(self): user_id", "models.QuestionDAO.save_all([ self.used_twice_question_dto, self.used_once_question_dto, self.unused_question_dto]) # Handcoding the dicts. This is", "str(self.used_twice_question_id)}]}) self.third_question_group_description = 'third_question_group' self.third_question_group_id = 6 self.third_question_group_dto = models.QuestionGroupDTO(", "def setUp(self): \"\"\"Sets up datastore contents.\"\"\" super(QuestionDAOTestCase, self).setUp() self.used_twice_question_id =", "self.transform).name()) class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put() answers =", "user_id = 'user_id' student = models.Student(key_name='name', user_id=user_id) student.put() self.assertEqual(user_id, student.get_key().name())", "You may obtain a copy of the License at #", "pylint: disable-msg=g-missing-docstring class EventEntityTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): event = models.EventEntity(source='source', user_id='1')", "user_id='1') key = event.put() exported = event.for_export(self.transform) self.assert_blacklisted_properties_removed(event, exported) self.assertEqual('source',", "4 self.first_question_group_dto = models.QuestionGroupDTO( self.first_question_group_id, {'description': self.first_question_group_description, 'items': [{'question': str(self.used_once_question_id)}]})", "profile = models.PersonalProfile( date_of_birth=date_of_birth, email=email, key_name=user_id, legal_name=legal_name, nick_name=nick_name) profile.put() exported", "the Apache License, Version 2.0 (the \"License\"); # you may", "models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name()) answers_key = answers.put() self.assertEqual( 'transformed_name', models.StudentAnswersEntity.safe_key(", "= 7 self.assertFalse(models.QuestionDAO.load(not_found_id)) self.assertEqual([], models.QuestionDAO.used_by(not_found_id)) class StudentTestCase(actions.ExportTestBase): def test_for_export_transforms_correctly(self): user_id", "student.put() self.assertEqual(user_id, student.get_key().name()) def test_safe_key_transforms_name(self): key = models.Student(key_name='name').put() self.assertEqual( 'transformed_name',", "class StudentAnswersEntityTestCase(actions.ExportTestBase): def test_safe_key_transforms_name(self): student_key = models.Student(key_name='name').put() answers = models.StudentAnswersEntity(key_name=student_key.name())" ]
[ "the waveform (Hz) frame_time (float, optional): Duration of a frame", "can be detected (Hz) (Default: ``85``). freq_high (int, optional): Highest", "on both sides so that the :math:`t`-th frame is centered", "factor of the second, elementwise. \"\"\" mask = (a[0] >", "# Centered windowed pad_length = (win_length - 1) // 2", "1, will compute DB to power. If 0.5, will compute", "_combine_max(half, best) indices = best[1] # Add back minimal lag", "def complex_norm( complex_tensor: Tensor, power: float = 1.0 ) ->", "time) sample_rate (int): The sample rate of the waveform (Hz)", "signal encoded with values from 0 to quantization_channels - 1.", "complex_tensor[..., 0]) def magphase( complex_tensor: Tensor, power: float = 1.0", "Expand batch shape = x_db.size() packed_channels = shape[-3] if x_db.dim()", "f_sp # Fill in the log-scale part min_log_hz = 1000.0", "= torch.rand(1) * mask_param min_value = torch.rand(1) * (specgram.size(axis) -", "be 2 dimensional. See also ```channels_first```. sample_rate (int): Sample rate", "(n_frame). \"\"\" if pad > 0: # TODO add \"with", "triangles zero = torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2])", "from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.", "frames near the ending edge won't be snipped, so that", "filter banks (fb matrix) of size (``n_freqs``, ``n_mels``) meaning number", "to x the number of filterbanks. Each column is a", "torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase, -1) mag =", "Setting this to 0 recovers the original Griffin-Lim method. Values", "Apr. 1984. Args: specgram (Tensor): A magnitude-only STFT spectrogram of", "incomplete frames near the ending edge won't be snipped, so", "function and median smoothing. Args: waveform (Tensor): Tensor of audio", "f_sp logstep = math.log(6.4) / 27.0 if freq >= min_log_hz:", "number of frequencies to highlight/apply to x the number of", "normalized cross-correlation function and median smoothing. Args: waveform (Tensor): Tensor", "True, compression: Optional[float] = None, encoding: Optional[str] = None, bits_per_sample:", "f_min) / f_sp # Fill in the log-scale part min_log_hz", "(Optional[str]): If 'slaney', divide the triangular mel weights by the", "Each column is a filterbank so that assuming there is", "x (Tensor): Input tensor before being converted to power/amplitude scale.", "b: Tuple[Tensor, Tensor], thresh: float = 0.99 ) -> Tuple[Tensor,", "= \\frac{\\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where :math:`\\phi_i(m)` is", "Returns: Tensor: Masked spectrogram of dimensions (channel, freq, time) \"\"\"", "(float, optional): Frame shift in milliseconds. (default: 10.0) min_f0 (float,", "to what an online decoder would see in the first", "dtype=complex_specgrams.dtype) alphas = time_steps % 1.0 phase_0 = angle(complex_specgrams[..., :1,", "/ f_diff[1:] # (n_freqs, n_mels) fb = torch.max(zero, torch.min(down_slopes, up_slopes))", "tensor input. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)`", "def create_fb_matrix( n_freqs: int, f_min: float, f_max: float, n_mels: int,", "variance = variance / window_frames variance -= ((cur_sum ** 2)", "final version of the features, which is the default. (default:", "(float): Sample rate of `waveform`. frame_length (float, optional): Frame length", "last_window_start, :] cur_sum -= frame_to_remove if norm_vars: cur_sumsq -= (frame_to_remove", "McFee, Brian, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. \"librosa:", "\"htk\": return 700.0 * (10.0**(mels / 2595.0) - 1.0) #", "in any dimension indices = torch.nn.functional.pad( indices, (pad_length, 0), mode=\"constant\",", "as _mod_utils import torchaudio __all__ = [ \"spectrogram\", \"griffinlim\", \"amplitude_to_DB\",", "= torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_waveform = torch.zeros( num_channels, num_frames,", "# randomly initialize the phase batch, freq, frames = specgram.size()", "= list(waveform.size()) waveform = waveform.reshape([-1] + shape[-1:]) nccf = _compute_nccf(waveform,", "torch.linspace( >>> 0, math.pi * hop_length, freq)[..., None] >>> x", "False, norm_vars: bool = False, ) -> Tensor: r\"\"\" Apply", "padding in any dimension indices = torch.nn.functional.pad( indices, (pad_length, 0),", "min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width,", "to Mels. Args: freqs (float): Frequencies in Hz mel_scale (str,", "Tensor: \"\"\"Convert mel bin numbers to frequencies. Args: mels (Tensor):", "FO change. (default: 0.1) lowpass_cutoff (float, optional): Cutoff frequency for", "= False, recompute_frame: int = 500, snip_edges: bool = True,", "extent possible, modulo end effects). If false, window is to", "twice lowpass-cutoff. (default: 4000) delta_pitch( float, optional): Smallest relative change", ") -> Tensor: r\"\"\"Detect pitch frequency. It is implemented using", "min_value[..., None, None] mask_end = (min_value + value)[..., None, None]", "if window_start < 0: window_start = 0 if last_window_start ==", "- 1.0) / mu return x def complex_norm( complex_tensor: Tensor,", "Implementation ported from `librosa`. * [1] McFee, Brian, <NAME>, <NAME>,", "Optional[int], rand_init: bool ) -> Tensor: r\"\"\"Compute waveform from a", "over the given window. \"\"\" # Centered windowed pad_length =", "stft. n_iter (int): Number of iteration for phase recovery process.", "min_log_hz) / logstep return mels def _mel_to_hz(mels: Tensor, mel_scale: str", "Fourier bins, and time is the number of window hops", "the padding method used when :attr:`center` is ``True``. Default: ``\"reflect\"``", "optional): Lowest frequency that can be detected (Hz) (Default: ``85``).", "of signal window (Tensor): Window tensor that is applied/multiplied to", "Otherwise, they have dimension ``[time, channel]``. compression (float): Used for", "x_mu = ((x_mu + 1) / 2 * mu +", "Minimum f0, applied in soft way, must not exceed min-f0", "frequencies mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``.", "if norm_vars: cur_sumsq += (frame_to_add ** 2) window_frames = window_end", "(float, optional): Cost factor for FO change. (default: 0.1) lowpass_cutoff", "normalized: warnings.warn( \"The argument normalized is not used in Griffin-Lim,", ">= mask_start) & (mask < mask_end), mask_value) specgrams = specgrams.transpose(axis,", "(n_freqs, n_mels) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) if norm is", "resample_frequency: float = 4000, delta_pitch: float = 0.005, nccf_ballast: float", "waveform.reshape(-1, shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram spec_f", "scale. Args: x (Tensor): Input tensor before being converted to", "\"\"\" assert momentum < 1, 'momentum={} > 1 can be", "+ 1) * (2 * n + 1) / 3", "last_window_end = window_end cmn_waveform[:, t, :] = waveform[:, t, :]", "false) Returns: Tensor: Tensor of freq of dimension (..., frame)", "n (c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2} where :math:`d_t` is the", "math.pi * torch.rand(batch, freq, frames) else: angles = torch.zeros(batch, freq,", "/ min_log_hz) / logstep return mels def _mel_to_hz(mels: Tensor, mel_scale:", "Tensor: The transformation matrix, to be right-multiplied to row-wise data", "import module_utils as _mod_utils import torchaudio __all__ = [ \"spectrogram\",", "= torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1]", "``3400``). Returns: Tensor: Tensor of freq of dimension (..., frame)", "of the complex tensor \"\"\" mag = complex_norm(complex_tensor, power) phase", "= io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression, format, encoding, bits_per_sample", "Returns: Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the", "is applied/multiplied to each frame/window n_fft (int): Size of FFT", "(win_length - 1) // 2 # \"replicate\" padding in any", "energy, 2 for power, etc. normalized (bool): Whether to normalize", "def _hz_to_mel(freq: float, mel_scale: str = \"htk\") -> float: r\"\"\"Convert", "cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels, num_feats,", "Librosa all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) # calculate", "mu-law companding. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_", "and have the form `(batch, channel, freq, time)`. multiplier (float):", "int, momentum: float, length: Optional[int], rand_init: bool ) -> Tensor:", "(int): Window size. (Default: ``n_fft``) power (float): Exponent for the", "dtype=dtype).repeat(specgram.shape[1], 1, 1) output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom", "> last_window_start: frame_to_remove = waveform[:, last_window_start, :] cur_sum -= frame_to_remove", "create_dct( n_mfcc: int, n_mels: int, norm: Optional[str] ) -> Tensor:", "<NAME>, <NAME> and <NAME> 2014 IEEE International Conference on Acoustics,", "before being converted to decibel scale. Input should take the", "```channels_first```. sample_rate (int): Sample rate of the audio waveform. format", "false, ignored if center==true (int, default = 100) center (bool,", "None ) -> Tensor: r\"\"\"Turn a spectrogram from the power/amplitude", "complex=2)` rate (float): Speed-up factor phase_advance (Tensor): Expected phase advance", "on mu-law companding. For more info see the `Wikipedia Entry", ") -> Tensor: r\"\"\" Apply median smoothing to the 1D", "to decibel scale. Input should take the form `(..., freq,", "frame) \"\"\" input_shape = waveform.shape num_frames, num_feats = input_shape[-2:] waveform", "``5``) mode (str, optional): Mode parameter passed to padding (Default:", "(default: 400.0) soft_min_f0 (float, optional): Minimum f0, applied in soft", "waveform def amplitude_to_DB( x: Tensor, multiplier: float, amin: float, db_multiplier:", "spectrogram(s) before being converted to decibel scale. Input should take", "= 5, mode: str = \"replicate\" ) -> Tensor: r\"\"\"Compute", "1.0) / mu return x def complex_norm( complex_tensor: Tensor, power:", "hop between STFT windows win_length (int): Window size Returns: Tensor:", "based on mu-law companding. For more info see the `Wikipedia", "and so may return different values for an audio clip", "* (mels[log_t] - min_log_mel)) return freqs def create_fb_matrix( n_freqs: int,", "delta2 = compute_deltas(delta) \"\"\" device = specgram.device dtype = specgram.dtype", "num_channels, num_frames, num_feats, dtype=dtype, device=device) for t in range(num_frames): window_start", "channels_first (bool): When True, both the input and output Tensor", "cur_sumsq variance = variance / window_frames variance -= ((cur_sum **", "bin conversion matrix. Args: n_freqs (int): Number of frequencies to", "complex spectrum is returned instead. normalized (bool): Whether to normalize", "# calculate mel freq bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max", "Convert indices to frequency EPSILON = 10 ** (-9) freq", ")` \"\"\" # Replace by torch.norm once issue is fixed", "phase + phase_advance phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc", "Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: mels", "pack batch shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:]))", "freq_low (int, optional): Lowest frequency that can be detected (Hz)", "last_window_end: frame_to_add = waveform[:, last_window_end, :] cur_sum += frame_to_add if", "Returns: freqs (Tensor): Mels converted in Hz \"\"\" if mel_scale", "``rate``. Args: complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)`", "torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom # unpack batch output =", "When True, both the input and output Tensor have dimension", "None and norm != \"slaney\": raise ValueError(\"norm must be one", "signal energy. Relevant if ``frames_per_chunk > 0``. (default: 500) snip_edges", "<NAME>, \"Signal estimation from modified short-time Fourier transform,\" IEEE Trans.", "Kaldi. Args: waveform (Tensor): The input waveform of shape `(...,", "Tuple import torch from torch import Tensor from torchaudio._internal import", "phase - 2 * math.pi * torch.round(phase / (2 *", "change. (default: 0.1) lowpass_cutoff (float, optional): Cutoff frequency for LowPass", "freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel)) return", "Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: freqs", "+ 1) / 3 specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)", "initialize the phase batch, freq, frames = specgram.size() if rand_init:", "or 'slaney'\") # freq bins # Equivalent filterbank construction by", "imag_stretch], dim=-1) # unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:])", "device=device) cmn_waveform = torch.zeros( num_channels, num_frames, num_feats, dtype=dtype, device=device) for", "-- not the final version of the features, which is", "(bool, optional): If this is set to false, the incomplete", "Tensor from torchaudio._internal import module_utils as _mod_utils import torchaudio __all__", "``length`` parameter if given. \"\"\" assert momentum < 1, 'momentum={}", "Optional[float], normalized: bool, center: bool = True, pad_mode: str =", "return mag, phase def phase_vocoder( complex_specgrams: Tensor, rate: float, phase_advance:", "r\"\"\"Convert Hz to Mels. Args: freqs (float): Frequencies in Hz", "frames = specgram.size() if rand_init: angles = 2 * math.pi", "if momentum: angles = angles - tprev.mul_(momentum / (1 +", "Optional[int] = None, ) -> Tensor: r\"\"\" Apply codecs as", "shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift, min_f0, max_f0,", "not center: if window_end > t: window_end = max(t +", "of dimension (..., freq, frames) where freq is ``n_fft //", "clip split into snippets vs. a full clip. Args: x", "quantization_channels - 1. Args: x (Tensor): Input tensor quantization_channels (int):", "http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi", "= int(math.ceil(sample_rate * frame_time)) waveform_length = waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length", "NCCF. Reference: - A pitch extraction algorithm tuned for automatic", "the frame-shift. This makes different types of features give the", "Create broadcastable mask mask_start = min_value[..., None, None] mask_end =", "# Store the previous iterate tprev = rebuilt # Invert", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. \"librosa: Audio and", "str = \"htk\") -> Tensor: \"\"\"Convert mel bin numbers to", "a signal encoded with values from 0 to quantization_channels -", "norm is not None and norm == \"slaney\": # Slaney-style", "cross-correlation function and median smoothing. Args: waveform (Tensor): Tensor of", "0.1, lowpass_cutoff: float = 1000, resample_frequency: float = 4000, delta_pitch:", "``n_fft // 2 + 1`` bins hop_length (int): Length of", "normalization). (Default: ``None``) mel_scale (str, optional): Scale to use: ``htk``", "* a[0] + ~mask * b[0] indices = mask *", "else: variance = cur_sumsq variance = variance / window_frames variance", "= True, pad_mode: str = \"reflect\", onesided: bool = True", "> 2 else 1 x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])", "Output tensor in power/amplitude scale. \"\"\" return ref * torch.pow(torch.pow(10.0,", "where freq is ``n_fft // 2 + 1``. window (Tensor):", "Per batch example masking specgrams = specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >=", "this warning, \" \"please use `normalized=False`.\") # pack batch shape", "of mel filterbanks norm (str or None): Norm to use", "float = 1.0 ) -> Tensor: r\"\"\"Compute the norm of", "5, max_frames_latency: int = 0, frames_per_chunk: int = 0, simulate_first_pass_online:", "int = 0, frames_per_chunk: int = 0, simulate_first_pass_online: bool =", "\"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\", \"angle\", \"magphase\", \"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid',", "dim=-1) \\ .to(dtype=specgram.dtype, device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles) # And initialize", "Used for formats other than WAV. For mor details see", "info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the", "form of augmentation. Args: waveform (Tensor): Audio data. Must be", "def create_dct( n_mfcc: int, n_mels: int, norm: Optional[str] ) ->", "For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional): Changes the", "shape (``n_mels``, ``n_mfcc``), normalized depending on norm. Args: n_mfcc (int):", "Returns: Tensor: Complex Specgrams Stretch with dimension of `(..., freq,", "torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1) # unpack batch complex_specgrams_stretch", "be one of \"htk\" or \"slaney\".') if mel_scale == \"htk\":", "from indices ``[v_0, v_0 + v)``, where ``v`` is sampled", "(specgram.size(axis) - value) mask_start = (min_value.long()).squeeze() mask_end = (min_value.long() +", "of `(..., complex=2)` power (float): Power of the norm. (Default:", "dct.t() def mu_law_encoding( x: Tensor, quantization_channels: int ) -> Tensor:", "= mask * a[1] + ~mask * b[1] return values,", "= min_log_mel + math.log(freq / min_log_hz) / logstep return mels", "frequency, 2 -> time) Returns: Tensor: Masked spectrogram of dimensions", "is the NCCF at frame :math:`i` with lag :math:`m`, :math:`w`", "the original Griffin-Lim method. Values near 1 can lead to", "# size (n_mfcc, n_mels) if norm is None: dct *=", "normalize variance to one. (bool, default = false) Returns: Tensor:", "t, :] *= variance cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats))", "pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. \"\"\" shape = waveform.shape waveform =", "all the lags is very close to the first half", "2]) \"\"\" # pack batch shape = complex_specgrams.size() complex_specgrams =", "complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2]) # (new_bins, freq,", "1: specgram[:, mask_start:mask_end] = mask_value elif axis == 2: specgram[:,", "freq def sliding_window_cmn( waveform: Tensor, cmn_window: int = 600, min_cmn_window:", "specgram (Tensor): Tensor of audio of dimension (..., freq, time)", "complex_specgrams.index_select(-2, (time_steps + 1).long()) angle_0 = angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1)", "length=length) # unpack batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return", "\"replicate\" padding in any dimension indices = torch.nn.functional.pad( indices, (pad_length,", "we down-sample the signal to. Must be more than twice", "optional): The window length used for computing delta (Default: ``5``)", "return indices def _median_smoothing( indices: Tensor, win_length: int ) ->", "of the norm. (Default: `1.0`). Returns: Tensor: Power of the", "0) e.g., 1 for energy, 2 for power, etc. normalized", ") -> Tensor: r\"\"\"Compute delta coefficients of a tensor, usually", "1 return indices def _median_smoothing( indices: Tensor, win_length: int )", "window_start window_start = 0 if not center: if window_end >", "will compute DB to power. If 0.5, will compute DB", "matrix, to be right-multiplied to row-wise data of size (``n_mels``,", "\"\"\" # Centered windowed pad_length = (win_length - 1) //", ".to(dtype=specgram.dtype, device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles) # And initialize the previous", "r\"\"\"Compute waveform from a linear scale magnitude spectrogram using the", "Returns: mels (float): Frequency in Mels \"\"\" if mel_scale not", "scale min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) /", "2 window_end = window_start + cmn_window else: window_start = t", "int, frame_time: float = 10 ** (-2), win_length: int =", "sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v -", "axis == 2: specgram[:, :, mask_start:mask_end] = mask_value else: raise", "not the final version of the features, which is the", "Apply a mask along ``axis``. Mask will be applied from", "device=device) else: variance = cur_sumsq variance = variance / window_frames", "= _median_smoothing(indices, win_length) # Convert indices to frequency EPSILON =", "= waveform[:, last_window_end, :] cur_sum += frame_to_add if norm_vars: cur_sumsq", "axis != 3: raise ValueError('Only Frequency and Time masking are", "float: r\"\"\"Convert Hz to Mels. Args: freqs (float): Frequencies in", "freq_high: int = 3400, ) -> Tensor: r\"\"\"Detect pitch frequency.", "of the expected output. rand_init (bool): Initializes phase randomly if", "_hz_to_mel(freq: float, mel_scale: str = \"htk\") -> float: r\"\"\"Convert Hz", "0.1) lowpass_cutoff (float, optional): Cutoff frequency for LowPass filter (Hz)", "x_db def DB_to_amplitude( x: Tensor, ref: float, power: float )", "in range(num_frames): window_start = 0 window_end = 0 if center:", "bit depth for the supported formats. For more details see", "mask_along_axis( specgram: Tensor, mask_param: int, mask_value: float, axis: int )", "[indices[..., pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1, win_length, 1) values, _", "pitch by a factor of ``rate``. Args: complex_specgrams (Tensor): Dimension", "Dimension of (freq, 1) Returns: Tensor: Complex Specgrams Stretch with", "1 for energy, 2 for power, etc. If None, then", "p=2, dim=-1) norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1) phase = angle_1", "\"magphase\", \"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\", ] def spectrogram(", "num_frames: window_start -= (window_end - num_frames) window_end = num_frames if", "\"\"\" if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale should", "= torch.nn.functional.pad(waveform, (0, p)) # Compute lags output_lag = []", "there is a matrix A of size (..., ``n_freqs``), the", "with values between 0 and quantization_channels - 1 and returns", "the 14th python in science conference, pp. 18-25. 2015. *", "``axis``. Mask will be applied from indices ``[v_0, v_0 +", "freqs (Tensor): Mels converted in Hz \"\"\" if mel_scale not", "``htk`` or ``slaney``. (Default: ``htk``) Returns: mels (float): Frequency in", "1`` bins hop_length (int): Length of hop between STFT windows.", "/ float(n_mels) * (n + 0.5) * k) # size", ">>> rate = 1.3 # Speed up by 30% >>>", "+ list(shape[-3:])) time_steps = torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas", "+ phase_advance phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc =", "waveform[:, last_window_start, :] cur_sum -= frame_to_remove if norm_vars: cur_sumsq -=", "= torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param min_value = torch.rand(specgrams.shape[:2], device=device,", "Update our phase estimates angles = rebuilt if momentum: angles", "freq = sample_rate / (EPSILON + indices.to(torch.float)) # unpack batch", "= False, ) -> Tensor: r\"\"\" Apply sliding-window cepstral mean", "or None): Exponent for the magnitude spectrogram, (must be >", "* [2] <NAME>., <NAME>., & <NAME>. \"A fast Griffin-Lim algorithm,\"", "*= 1.0 / math.sqrt(2.0) dct *= math.sqrt(2.0 / float(n_mels)) return", "= None, ) -> Tensor: r\"\"\" Apply codecs as a", "= specgram.dtype # pack batch shape = specgram.size() specgram =", "of the forward pointers, after revising our estimate of the", "in milliseconds. (default: 25.0) frame_shift (float, optional): Frame shift in", "- 1.0) # Fill in the linear scale f_min =", "= angle_1 - angle_0 - phase_advance phase = phase -", ":] - cur_sum / window_frames if norm_vars: if window_frames ==", "float = 0.005, nccf_ballast: float = 7000, lowpass_filter_width: int =", "Returns: Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)", "/ logstep return mels def _mel_to_hz(mels: Tensor, mel_scale: str =", "= int(math.ceil(waveform_length / frame_size)) p = lags + num_of_frames *", "* torch.cos(phase_acc) imag_stretch = mag * torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch,", "num_feats, dtype=dtype, device=device) else: variance = cur_sumsq variance = variance", "the width of the mel band (area normalization). (Default: ``None``)", ">= min_log_hz: mels = min_log_mel + math.log(freq / min_log_hz) /", "torch.min(down_slopes, up_slopes)) if norm is not None and norm ==", "This algorithm assumes the signal has been scaled to between", "lags + 1): s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames,", "3 mels = (freq - f_min) / f_sp # Fill", "str = \"htk\", ) -> Tensor: r\"\"\"Create a frequency bin", "complex_tensor (Tensor): Tensor shape of `(..., complex=2)` Return: Tensor: Angle", "None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None, )", "return half of results to avoid redundancy. Default: ``True`` Returns:", "being converted to decibel scale. Input should take the form", "scale. \"\"\" return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)", "({n_freqs}) may be set too low.\" ) return fb def", "waveform[:, last_window_end, :] cur_sum += frame_to_add if norm_vars: cur_sumsq +=", "the time axis. The spectral centroid is defined as the", "have dimension ``[time, channel]``. compression (float): Used for formats other", "2014 IEEE International Conference on Acoustics, Speech and Signal Processing", "f_min) / f_sp logstep = math.log(6.4) / 27.0 if freq", "angle( complex_tensor: Tensor ) -> Tensor: r\"\"\"Compute the angle of", "recompute_frame: int = 500, snip_edges: bool = True, ) ->", "2 -> time) Returns: Tensor: Masked spectrogram of dimensions (channel,", "when upsampling NCCF. (default: 5) max_frames_latency (int, optional): Maximum number", "A reasonable number is 80. (Default: ``None``) Returns: Tensor: Output", "Length of hop between STFT windows win_length (int): Window size", "indices = _find_max_per_frame(nccf, sample_rate, freq_high) indices = _median_smoothing(indices, win_length) #", "input and output Tensor have dimension ``[channel, time]``. Otherwise, they", "waveform: Tensor, sample_rate: int, frame_time: float, freq_low: int ) ->", "parameter passed to padding (Default: ``\"replicate\"``) Returns: Tensor: Tensor of", "x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with 231", "in the linear scale f_min = 0.0 f_sp = 200.0", "ref: float, power: float ) -> Tensor: r\"\"\"Turn a tensor", "freq_high)) # Find near enough max that is smallest best", "be more than twice lowpass-cutoff. (default: 4000) delta_pitch( float, optional):", "decibel scale to the power/amplitude scale. Args: x (Tensor): Input", "and amin)) top_db (float or None, optional): Minimum negative cut-off", "time, complex=2) >>> complex_specgrams = torch.randn(2, freq, 300, 2) >>>", "complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor, mask_param:", "time]``. Otherwise, they have dimension ``[time, channel]``. compression (float): Used", "calculate the difference between each mel point and each stft", "d_t = \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2} where", "Args: x_mu (Tensor): Input tensor quantization_channels (int): Number of channels", "audio of dimension (..., time) sample_rate (int): Sample rate of", "(float): The momentum parameter for fast Griffin-Lim. Setting this to", "angles = 2 * math.pi * torch.rand(batch, freq, frames) else:", "beginning of frame :math:`i`, :math:`E(j)` is the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`.", "mag * torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1) # unpack", "(-2)``). win_length (int, optional): The window length for median smoothing", "= cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats)) if len(input_shape) == 2: cmn_waveform", "float = 50, max_f0: float = 400, soft_min_f0: float =", "(float, optional): Minimum f0, applied in soft way, must not", "= _hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min,", "int(math.ceil(waveform_length / frame_size)) p = lags + num_of_frames * frame_size", "take the highest value of NCCF, apply centered median smoothing,", "(float): Used for formats other than WAV. For mor details", "rate of `waveform`. frame_length (float, optional): Frame length in milliseconds.", "of frames used for energy normalization. (default: 0) simulate_first_pass_online (bool,", "pitch based on method described in [1]. This function computes", "float, amin: float, db_multiplier: float, top_db: Optional[float] = None )", "= f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2) #", "tensor over the given window. \"\"\" # Centered windowed pad_length", "-1: input_part = waveform[:, window_start: window_end - window_start, :] cur_sum", "*= variance cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats)) if len(input_shape)", "equals the ``length`` parameter if given. \"\"\" assert momentum <", "pad_length = (win_length - 1) // 2 # \"replicate\" padding", "float(n_mels) * (n + 0.5) * k) # size (n_mfcc,", "list(shape[-2:])) specgram = specgram.pow(1 / power) # randomly initialize the", "min_cmn_window) if window_end > num_frames: window_start -= (window_end - num_frames)", "(..., time) sample_rate (int): Sample rate of the audio waveform", "`(..., freq, ceil(time/rate), complex=2)` Example >>> freq, hop_length = 1025,", "near the ending edge won't be snipped, so that the", "that can be detected (Hz) (Default: ``3400``). Returns: Tensor: Tensor", "to frequencies. Args: mels (Tensor): Mel frequencies mel_scale (str, optional):", "+ (num_frames, num_feats)) if len(input_shape) == 2: cmn_waveform = cmn_waveform.squeeze(0)", "phase_advance phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase,", "= torch.stack([real_stretch, imag_stretch], dim=-1) # unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3]", ">= 3 n = (win_length - 1) // 2 #", "( (s1 * s2).sum(-1) / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)", "specgrams = specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >= mask_start) & (mask <", "Whether to normalize by magnitude after stft center (bool, optional):", "milliseconds. (default: 10.0) min_f0 (float, optional): Minimum F0 to search", "1.0 ) -> Tuple[Tensor, Tensor]: r\"\"\"Separate a complex-valued spectrogram with", "Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``) meaning", "def _find_max_per_frame( nccf: Tensor, sample_rate: int, freq_high: int ) ->", "= waveform[:, t, :] - cur_sum / window_frames if norm_vars:", "on norm. Args: n_mfcc (int): Number of mfc coefficients to", "(float, optional): Increasing this factor reduces NCCF for quiet frames", "normalization. (default: 0) simulate_first_pass_online (bool, optional): If true, the function", "= True ) -> Tensor: r\"\"\"Create a spectrogram or a", "sample rate of the waveform (Hz) frame_time (float, optional): Duration", "NCCF. (default: 5) max_frames_latency (int, optional): Maximum number of frames", "of freq of dimension (..., frame) \"\"\" # pack batch", "ref * torch.pow(torch.pow(10.0, 0.1 * x), power) def _hz_to_mel(freq: float,", "Phase Accum phase = phase + phase_advance phase = torch.cat([phase_0,", "<NAME>, <NAME>, <NAME>, and <NAME>. \"librosa: Audio and music signal", "Masked spectrograms of dimensions (batch, channel, freq, time) \"\"\" if", "the beginning of frame :math:`i`, :math:`E(j)` is the energy :math:`\\sum_{n=j}^{j+N-1}", "coefficients to retain n_mels (int): Number of mel filterbanks norm", "griffinlim( specgram: Tensor, window: Tensor, n_fft: int, hop_length: int, win_length:", "torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) def magphase( complex_tensor: Tensor, power: float", "Changes the encoding for the supported formats. For more details", "sample_rate (float): Sample rate of `waveform`. frame_length (float, optional): Frame", "1) upsample_filter_width (int, optional): Integer that determines filter width when", "norm. (Default: `1.0`). Returns: Tensor: Power of the normed input", "complex_tensor: Tensor, power: float = 1.0 ) -> Tensor: r\"\"\"Compute", "hops (n_frame). \"\"\" if pad > 0: # TODO add", "Frequency and Time masking are supported') device = specgrams.device dtype", "num_frames) window_end = num_frames if window_start < 0: window_start =", "ceil(300 / 1.3) torch.Size([2, 1025, 231, 2]) \"\"\" # pack", "= 50, max_f0: float = 400, soft_min_f0: float = 10.0,", ") -> Tensor: r\"\"\"Decode mu-law encoded signal. For more info", "normed input tensor. Shape of `(..., )` \"\"\" # Replace", "``n_mels``) meaning number of frequencies to highlight/apply to x the", "Tensor, win_length: int = 5, mode: str = \"replicate\" )", "used in Griffin-Lim, \" \"and will be removed in v0.9.0", "frame_time: float, freq_low: int ) -> Tensor: r\"\"\" Compute Normalized", "(int): Number of columns to be masked will be uniformly", "Tensor, sample_rate: int, pad: int, window: Tensor, n_fft: int, hop_length:", "torchaudio __all__ = [ \"spectrogram\", \"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\",", "spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if power", "``uniform(0, max_v - v)``. Args: specgrams (Tensor): Real spectrograms (batch,", "row-wise data of size (``n_mels``, ``n_mfcc``). \"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n", "def mu_law_decoding( x_mu: Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Decode", "where ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from", "\"and will be removed in v0.9.0 release. To suppress this", "release. To suppress this warning, \" \"please use `normalized=False`.\") #", "2 * math.pi * torch.rand(batch, freq, frames) else: angles =", "Tensor: r\"\"\"Compute the angle of complex tensor input. Args: complex_tensor", "indices = mask * a[1] + ~mask * b[1] return", "up by 30% >>> phase_advance = torch.linspace( >>> 0, math.pi", "= torch.median(roll, -1) return values def detect_pitch_frequency( waveform: Tensor, sample_rate:", "(Default: `1.0`) Returns: (Tensor, Tensor): The magnitude and phase of", "EPSILON = 10 ** (-9) freq = sample_rate / (EPSILON", "randomly initialize the phase batch, freq, frames = specgram.size() if", "/ (f_pts[2:n_mels + 2] - f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if", "value.long()).squeeze() assert mask_end - mask_start < mask_param if axis ==", "apply centered median smoothing, and convert to frequency. Note: If", "50, max_f0: float = 400, soft_min_f0: float = 10.0, penalty_factor:", "min_cmn_window: int = 100, center: bool = False, norm_vars: bool", "into the feature processing (affects output only if ``frames_per_chunk >", "sample_rate // 2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1))", "torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel = torch.arange(-n, n + 1,", "torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu return", "rebuilt if momentum: angles = angles - tprev.mul_(momentum / (1", "*= enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any(): warnings.warn( \"At least one", "win_length (int): Window size power (float or None): Exponent for", "Tensor, win_length: int ) -> Tensor: r\"\"\" Apply median smoothing", "= (min_value.long()).squeeze() mask_end = (min_value.long() + value.long()).squeeze() assert mask_end -", "torch.cumsum(phase, -1) mag = alphas * norm_1 + (1 -", "mask_along_axis_iid( specgrams: Tensor, mask_param: int, mask_value: float, axis: int )", "* specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform: Tensor, sample_rate:", "``[channel, time]`` else ``[time, channel]``. \"\"\" bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes,", "(default: False) Relevant if ``frames_per_chunk > 0``. recompute_frame (int, optional):", "+= torch.cumsum(input_part ** 2, 1)[:, -1, :] else: if window_start", "along the time axis. The spectral centroid is defined as", "0]) def magphase( complex_tensor: Tensor, power: float = 1.0 )", ") -> Tensor: r\"\"\" For each frame, take the highest", "win_length >= 3 n = (win_length - 1) // 2", "(channel, freq, time) mask_param (int): Number of columns to be", "latter is taken. \"\"\" lag_min = int(math.ceil(sample_rate / freq_high)) #", "1)) freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)", "fb = torch.max(zero, torch.min(down_slopes, up_slopes)) if norm is not None", "norm (str or None): Norm to use (either 'ortho' or", "may be set too low.\" ) return fb def create_dct(", "computation (int, default = 600) min_cmn_window (int, optional): Minimum CMN", "= min_value[..., None, None] mask_end = (min_value + value)[..., None,", "have the same mask interval. Args: specgram (Tensor): Real spectrogram", "normalize by magnitude after stft. n_iter (int): Number of iteration", "min_f0 (float, optional): Minimum F0 to search for (Hz) (default:", "def _mel_to_hz(mels: Tensor, mel_scale: str = \"htk\") -> Tensor: \"\"\"Convert", "= waveform.shape num_frames, num_feats = input_shape[-2:] waveform = waveform.view(-1, num_frames,", "cut-off in decibels. A reasonable number is 80. (Default: ``None``)", "size (``n_freqs``, ``n_mels``) meaning number of frequencies to highlight/apply to", "Tensor: r\"\"\"Compute the norm of complex tensor input. Args: complex_tensor", "audio of dimension (..., freq, time) cmn_window (int, optional): Window", "400, soft_min_f0: float = 10.0, penalty_factor: float = 0.1, lowpass_cutoff:", "mask_start = (min_value.long()).squeeze() mask_end = (min_value.long() + value.long()).squeeze() assert mask_end", "but above 1 may not converge. length (int or None):", ":math:`m`, :math:`w` is the waveform, :math:`N` is the length of", "norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1) phase = angle_1 - angle_0", "on method described in [1]. This function computes the equivalent", "frequencies. Args: mels (Tensor): Mel frequencies mel_scale (str, optional): Scale", "converge. length (int or None): Array length of the expected", "frequency that can be detected (Hz) (Default: ``3400``). Returns: Tensor:", "our algorithm measures. (default: 0.005) nccf_ballast (float, optional): Increasing this", "Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last", "cmn_waveform = cmn_waveform.squeeze(0) return cmn_waveform def spectral_centroid( waveform: Tensor, sample_rate:", "0, frames_per_chunk: int = 0, simulate_first_pass_online: bool = False, recompute_frame:", "Returns: Tensor: Output tensor in power/amplitude scale. \"\"\" return ref", "different values for an audio clip split into snippets vs.", "\"spectral_centroid\", \"apply_codec\", ] def spectrogram( waveform: Tensor, pad: int, window:", "e.g., 1 for energy, 2 for power, etc. If None,", "for quiet frames (default: 7000) lowpass_filter_width (int, optional): Integer that", "the highest value of NCCF, apply centered median smoothing, and", "v)``. Args: specgrams (Tensor): Real spectrograms (batch, channel, freq, time)", "(frame_to_add ** 2) window_frames = window_end - window_start last_window_start =", "(string, optional): controls the padding method used when :attr:`center` is", "if mel_scale == \"htk\": return 700.0 * (10.0**(mels / 2595.0)", "Args: waveform (Tensor): Tensor of audio of dimension (..., freq,", "mu = torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu *", "Number of lags to check lags = int(math.ceil(sample_rate / freq_low))", "mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max, n_mels", "torch.max(zero, torch.min(down_slopes, up_slopes)) if norm is not None and norm", "pointers, after revising our estimate of the signal energy. Relevant", "usually a spectrogram: .. math:: d_t = \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n}", "num_feats, dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_waveform", "freq, frames) where freq is ``n_fft // 2 + 1``.", "Smallest relative change in pitch that our algorithm measures. (default:", "float, axis: int ) -> Tensor: r\"\"\" Apply a mask", "waveform (Tensor): Tensor of audio of dimension (..., time) pad", "(default: 10.0) min_f0 (float, optional): Minimum F0 to search for", "of dimension (..., freq, time) win_length (int, optional): The window", "-1) best = _combine_max(half, best) indices = best[1] # Add", "specgram.device dtype = specgram.dtype # pack batch shape = specgram.size()", "(default: 0.1) lowpass_cutoff (float, optional): Cutoff frequency for LowPass filter", "expected output. rand_init (bool): Initializes phase randomly if True, to", "more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes", "angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the final phase estimates waveform", "Tensor of audio of dimension (..., time) sample_rate (int): Sample", "((x_mu) / mu) * 2 - 1.0 x = torch.sign(x)", "-2, -1)) - top_db).view(-1, 1, 1, 1)) # Repack batch", "0) simulate_first_pass_online (bool, optional): If true, the function will output", "and <NAME>. \"librosa: Audio and music signal analysis in python.\"", "onesided=True, return_complex=True, ) ) # Update our phase estimates angles", "specgram.unsqueeze(-1).expand_as(angles) # And initialize the previous iterate to 0 rebuilt", "...)``. \"\"\" if norm is not None and norm !=", "Returns: Tensor: Tensor of freq of dimension (..., frame) \"\"\"", "bool = False, norm_vars: bool = False, ) -> Tensor:", ") -> Tuple[Tensor, Tensor]: \"\"\" Take value from first if", "function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi. Args: waveform", "# Add 1 empirical calibration offset indices += 1 return", "bool = True, ) -> torch.Tensor: \"\"\"Extract pitch based on", "torch.randn(2, freq, 300, 2) >>> rate = 1.3 # Speed", "norm == \"ortho\" dct[0] *= 1.0 / math.sqrt(2.0) dct *=", "- A pitch extraction algorithm tuned for automatic speech recognition", "waveform = waveform.reshape([-1] + shape[-1:]) nccf = _compute_nccf(waveform, sample_rate, frame_time,", "Griffin-Lim, \" \"and will be removed in v0.9.0 release. To", "f_sp = 200.0 / 3 mels = (freq - f_min)", "recompute_frame, snip_edges, ) result = result.reshape(shape[:-1] + result.shape[-2:]) return result", "+= torch.sum(input_part, 1) if norm_vars: cur_sumsq += torch.cumsum(input_part ** 2,", "freq bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale)", "mfc coefficients to retain n_mels (int): Number of mel filterbanks", "(mels >= min_log_mel) freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t]", "'momentum={} > 1 can be unstable'.format(momentum) assert momentum >= 0,", "the final phase estimates waveform = torch.istft(specgram * angles, n_fft=n_fft,", "coding: utf-8 -*- import io import math import warnings from", "waveform norm (Optional[str]): If 'slaney', divide the triangular mel weights", "angles = torch.zeros(batch, freq, frames) angles = torch.stack([angles.cos(), angles.sin()], dim=-1)", "phase recovery process. momentum (float): The momentum parameter for fast", "phase = angle(complex_tensor) return mag, phase def phase_vocoder( complex_specgrams: Tensor,", "torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)", "\"angle\", \"magphase\", \"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\", ] def", "for phase recovery process. momentum (float): The momentum parameter for", "spectrogram using the Griffin-Lim transformation. Implementation ported from `librosa`. *", "shape[-1]) assert win_length >= 3 n = (win_length - 1)", "roll = indices.unfold(-1, win_length, 1) values, _ = torch.median(roll, -1)", "to normalize by magnitude after stft center (bool, optional): whether", "# (n_freqs, n_mels + 2) # create overlapping triangles zero", "normalized=False, onesided=True, return_complex=True, ) ) # Update our phase estimates", "dimension indices = torch.nn.functional.pad( indices, (pad_length, 0), mode=\"constant\", value=0. )", "+ 2] - f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values ==", "last_window_end = -1 cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq", "set too low.\" ) return fb def create_dct( n_mfcc: int,", "\"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\", \"angle\", \"magphase\", \"phase_vocoder\", 'mask_along_axis',", "augmentation. Args: waveform (Tensor): Audio data. Must be 2 dimensional.", "Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. \"\"\" shape = waveform.shape", "The window length for median smoothing (in number of frames)", "!= 3: raise ValueError('Only Frequency and Time masking are supported')", "700.0 * (10.0**(mels / 2595.0) - 1.0) # Fill in", "masking on (1 -> frequency, 2 -> time) Returns: Tensor:", "* s2).sum(-1) / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON", "power (float or None): Exponent for the magnitude spectrogram, (must", "of the second, elementwise. \"\"\" mask = (a[0] > thresh", "= compute_deltas(specgram) >>> delta2 = compute_deltas(delta) \"\"\" device = specgram.device", "(Tensor): Dimension of `(..., freq, time, complex=2)` rate (float): Speed-up", "forward pointers, after revising our estimate of the signal energy.", "angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1", ":-1]], dim=-1) phase_acc = torch.cumsum(phase, -1) mag = alphas *", "F0 to search for (Hz) (default: 50.0) max_f0 (float, optional):", "quantization_channels - 1 and returns a signal scaled between -1", "\"\"\" input_shape = waveform.shape num_frames, num_feats = input_shape[-2:] waveform =", "not None and norm == \"slaney\": # Slaney-style mel is", "import math import warnings from typing import Optional, Tuple import", "math:: d_t = \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2}", "- v)``. Args: specgrams (Tensor): Real spectrograms (batch, channel, freq,", "spectrogram( waveform: Tensor, pad: int, window: Tensor, n_fft: int, hop_length:", "mask * a[0] + ~mask * b[0] indices = mask", "amin)) top_db (float or None, optional): Minimum negative cut-off in", "of (freq, 1) Returns: Tensor: Complex Specgrams Stretch with dimension", "freq bins # Equivalent filterbank construction by Librosa all_freqs =", "waveform. format (str): File format. channels_first (bool): When True, both", "frequency values, weighted by their magnitude. Args: waveform (Tensor): Tensor", "[3] <NAME> and <NAME>, \"Signal estimation from modified short-time Fourier", "/ f_sp logstep = math.log(6.4) / 27.0 if freq >=", "Shape of `(..., )` \"\"\" # Replace by torch.norm once", "window_end = max(t + 1, min_cmn_window) if window_end > num_frames:", "size divided by the frame-shift. This makes different types of", "= 1.3 # Speed up by 30% >>> phase_advance =", "sum of integer squared denom = n * (n +", "of features give the same number of frames. (default: True)", "and NCCF. Reference: - A pitch extraction algorithm tuned for", "between STFT windows. ( Default: ``win_length // 2``) win_length (int):", "of the 14th python in science conference, pp. 18-25. 2015.", "/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON + torch.norm(s2,", "float, phase_advance: Tensor ) -> Tensor: r\"\"\"Given a STFT tensor,", "cmn_window // 2 window_end = window_start + cmn_window else: window_start", "= 0 window_end = 0 if center: window_start = t", "the complex spectrum is returned instead. normalized (bool): Whether to", "1.0: return spec_f.abs() return spec_f.abs().pow(power) return torch.view_as_real(spec_f) def griffinlim( specgram:", "in range(n_iter): # Store the previous iterate tprev = rebuilt", "nccf.shape[-1] // 2 half = torch.max(nccf[..., lag_min:half_size], -1) best =", "* frame_size - waveform_length waveform = torch.nn.functional.pad(waveform, (0, p)) #", "return values def detect_pitch_frequency( waveform: Tensor, sample_rate: int, frame_time: float", "window_end > num_frames: window_start -= (window_end - num_frames) window_end =", "last_window_start = window_start last_window_end = window_end cmn_waveform[:, t, :] =", "`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been", "1) // 2 # \"replicate\" padding in any dimension indices", "triangular mel weights by the width of the mel band", "-> Tensor: r\"\"\"Given a STFT tensor, speed up in time", "the signal has been scaled to between -1 and 1", ":attr:`waveform` on both sides so that the :math:`t`-th frame is", "specgrams.device dtype = specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) *", "time) cmn_window (int, optional): Window in frames for running average", "(Default: ``None``) mel_scale (str, optional): Scale to use: ``htk`` or", "If true, use a window centered on the current frame", "power is not None: if power == 1.0: return spec_f.abs()", "frame_to_remove if norm_vars: cur_sumsq -= (frame_to_remove ** 2) if window_end", "would see in the first pass of decoding -- not", "** 2, 1)[:, -1, :] else: if window_start > last_window_start:", "of a frame, :math:`b_i` is the beginning of frame :math:`i`,", "``uniform(0, max_v - v)``. All examples will have the same", "True, to zero otherwise. Returns: torch.Tensor: waveform of (..., time),", "of the audio waveform norm (Optional[str]): If 'slaney', divide the", "= 0.0 f_sp = 200.0 / 3 mels = (freq", "spectrogram of dimension (..., freq, frames) where freq is ``n_fft", "1)) # Repack batch x_db = x_db.reshape(shape) return x_db def", "frame/window n_fft (int): Size of FFT, creates ``n_fft // 2", "Speed up by 30% >>> phase_advance = torch.linspace( >>> 0,", "each frame/window n_fft (int): Size of FFT, creates ``n_fft //", "can be detected (Hz) (Default: ``3400``). Returns: Tensor: Tensor of", "between STFT windows win_length (int): Window size Returns: Tensor: Dimension", ") -> Tensor: r\"\"\"Create a DCT transformation matrix with shape", "Optional, Tuple import torch from torch import Tensor from torchaudio._internal", "be ``A * create_fb_matrix(A.size(-1), ...)``. \"\"\" if norm is not", "* mels # And now the nonlinear scale min_log_hz =", "dim=-1)).pow(2) / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf", ":-2]) / f_diff[:-1] # (n_freqs, n_mels) up_slopes = slopes[:, 2:]", "result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift, min_f0, max_f0, soft_min_f0,", "(affects output only if ``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``) (default:", "= waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length / frame_size)) p = lags", "(num_frames, num_feats)) if len(input_shape) == 2: cmn_waveform = cmn_waveform.squeeze(0) return", "Dimension (..., freq, time), freq is ``n_fft // 2 +", "1. Args: x (Tensor): Input tensor quantization_channels (int): Number of", "2:] / f_diff[1:] # (n_freqs, n_mels) fb = torch.max(zero, torch.min(down_slopes,", "+ (freq / 700.0)) # Fill in the linear part", "= torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False,", "spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode,", "compute DB to power. If 0.5, will compute DB to", "waveform.reshape([-1] + shape[-1:]) nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) indices", "thresh * b[0]) values = mask * a[0] + ~mask", "``frames_per_chunk > 0``. (default: 500) snip_edges (bool, optional): If this", "estimate of the signal energy. Relevant if ``frames_per_chunk > 0``.", "norm_0 real_stretch = mag * torch.cos(phase_acc) imag_stretch = mag *", "(str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)", "= waveform.view(-1, num_frames, num_feats) num_channels = waveform.shape[0] dtype = waveform.dtype", "= 10.0, penalty_factor: float = 0.1, lowpass_cutoff: float = 1000,", "= n * (n + 1) * (2 * n", "* slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels) up_slopes =", "str = \"htk\") -> float: r\"\"\"Convert Hz to Mels. Args:", "a frame (Default: ``10 ** (-2)``). win_length (int, optional): The", "parameter for fast Griffin-Lim. Setting this to 0 recovers the", "``slaney``. (Default: ``htk``) Returns: freqs (Tensor): Mels converted in Hz", "freq, time, complex=2) >>> complex_specgrams = torch.randn(2, freq, 300, 2)", "channel enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])", ") -> Tuple[Tensor, Tensor]: r\"\"\"Separate a complex-valued spectrogram with shape", "0 if last_window_start == -1: input_part = waveform[:, window_start: window_end", "the ending edge won't be snipped, so that the number", "Griffin-Lim. Setting this to 0 recovers the original Griffin-Lim method.", "r\"\"\"Turn a tensor from the decibel scale to the power/amplitude", "decibels. A reasonable number is 80. (Default: ``None``) Returns: Tensor:", "ref (float): Reference which the output will be scaled by.", "that assuming there is a matrix A of size (...,", "length: Optional[int], rand_init: bool ) -> Tensor: r\"\"\"Compute waveform from", "``None``) Returns: Tensor: Output tensor in decibel scale \"\"\" x_db", "(float): Power of the norm. (Default: `1.0`). Returns: Tensor: Power", "snip_edges: bool = True, ) -> torch.Tensor: \"\"\"Extract pitch based", "(default: 1) upsample_filter_width (int, optional): Integer that determines filter width", "rebuilt = torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True,", "[0, mask_param] mask_value (float): Value to assign to the masked", "= false) norm_vars (bool, optional): If true, normalize variance to", "signal. The spectrogram can be either magnitude-only or complex. Args:", "\"\"\" mu = quantization_channels - 1.0 if not x_mu.is_floating_point(): x_mu", "bigger than a multiplicative factor of the second, elementwise. \"\"\"", "applied/multiplied to each frame/window n_fft (int): Size of FFT, creates", ".. math:: \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}},", "momentum parameter for fast Griffin-Lim. Setting this to 0 recovers", "channel, freq, time)`. multiplier (float): Use 10. for power and", "def _compute_nccf( waveform: Tensor, sample_rate: int, frame_time: float, freq_low: int", "``(win_length-1)//2``. Args: specgram (Tensor): Tensor of audio of dimension (...,", "compute DB to amplitude. Returns: Tensor: Output tensor in power/amplitude", "power/amplitude scale. \"\"\" return ref * torch.pow(torch.pow(10.0, 0.1 * x),", "return 2595.0 * math.log10(1.0 + (freq / 700.0)) # Fill", "Minimum F0 to search for (Hz) (default: 50.0) max_f0 (float,", "IEEE Workshop on Applications of Signal Processing to Audio and", "\"librosa: Audio and music signal analysis in python.\" In Proceedings", "None: # Expand batch shape = x_db.size() packed_channels = shape[-3]", "-> Tensor: r\"\"\" Apply codecs as a form of augmentation.", "= alphas * norm_1 + (1 - alphas) * norm_0", "an input with values between 0 and quantization_channels - 1", "** (-2), win_length: int = 30, freq_low: int = 85,", "the previous iterate to 0 rebuilt = torch.tensor(0.) for _", "Speed-up factor phase_advance (Tensor): Expected phase advance in each bin.", "if x_db.dim() > 2 else 1 x_db = x_db.reshape(-1, packed_channels,", "complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas = time_steps % 1.0 phase_0", "specgram[:, mask_start:mask_end] = mask_value elif axis == 2: specgram[:, :,", "-> Tensor: r\"\"\"Compute delta coefficients of a tensor, usually a", "n * (n + 1) * (2 * n +", "the mel band (area normalization). (Default: ``None``) mel_scale (str, optional):", "recompute_frame (int, optional): Only relevant for compatibility with online pitch", "by the width of the mel band (area normalization). (Default:", "(..., time) pad (int): Two sided padding of signal window", "Returns: Tensor: Tensor of deltas of dimension (..., freq, time)", "Time masking are supported') # unpack batch specgram = specgram.reshape(shape[:-2]", "return_complex=True, ) # unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])", "complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor, mask_param: int, mask_value:", "pack batch shape = specgram.size() specgram = specgram.reshape(1, -1, shape[-1])", "\\frac{\\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where :math:`\\phi_i(m)` is the", "magnitude and phase of the complex tensor \"\"\" mag =", "Returns: Tensor: Output tensor in decibel scale \"\"\" x_db =", "nccf: Tensor, sample_rate: int, freq_high: int ) -> Tensor: r\"\"\"", "2)) variance = torch.pow(variance, -0.5) cmn_waveform[:, t, :] *= variance", "factor for FO change. (default: 0.1) lowpass_cutoff (float, optional): Cutoff", "window_end = window_start + cmn_window else: window_start = t -", "= \"htk\", ) -> Tensor: r\"\"\"Create a frequency bin conversion", "torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas = time_steps % 1.0", "input. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` Return:", "* n + 1) / 3 specgram = torch.nn.functional.pad(specgram, (n,", "For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional): Changes the", "complex tensor \"\"\" mag = complex_norm(complex_tensor, power) phase = angle(complex_tensor)", "str = \"replicate\" ) -> Tensor: r\"\"\"Compute delta coefficients of", "int, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length:", "makes different types of features give the same number of", "device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles) # And initialize the previous iterate", ":pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1,", "pitch extraction algorithm tuned for automatic speech recognition <NAME>, <NAME>,", "Number of channels Returns: Tensor: Input after mu-law encoding \"\"\"", "else: if window_start > last_window_start: frame_to_remove = waveform[:, last_window_start, :]", "norm_vars: bool = False, ) -> Tensor: r\"\"\" Apply sliding-window", "0.005) nccf_ballast (float, optional): Increasing this factor reduces NCCF for", "be detected (Hz) (Default: ``3400``). Returns: Tensor: Tensor of freq", "momentum >= 0, 'momentum={} < 0'.format(momentum) if normalized: warnings.warn( \"The", "\"slaney\": # Slaney-style mel is scaled to be approx constant", "win_length (int, optional): The window length for median smoothing (in", "\"slaney\".') if mel_scale == \"htk\": return 2595.0 * math.log10(1.0 +", "padding method used when :attr:`center` is ``True``. Default: ``\"reflect\"`` onesided", "= last_window_end = -1 cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)", "~mask * b[1] return values, indices def _find_max_per_frame( nccf: Tensor,", "from ``uniform(0, max_v - v)``. All examples will have the", "last_window_start == -1: input_part = waveform[:, window_start: window_end - window_start,", "hop between STFT windows win_length (int): Window size power (float", "int, norm: Optional[str] ) -> Tensor: r\"\"\"Create a DCT transformation", "true, normalize variance to one. (bool, default = false) Returns:", "transformation. Implementation ported from `librosa`. * [1] McFee, Brian, <NAME>,", "1): s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] s2", "mor details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional): Changes the encoding", ">>> 0, math.pi * hop_length, freq)[..., None] >>> x =", "- cmn_window window_end = t + 1 if window_start <", "mode=mode) kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1],", "Centered windowed pad_length = (win_length - 1) // 2 #", "spectrogram or a batch of spectrograms from a raw audio", "possible, modulo end effects). If false, window is to the", "= compute_deltas(delta) \"\"\" device = specgram.device dtype = specgram.dtype #", "(float): Minimum frequency (Hz) f_max (float): Maximum frequency (Hz) n_mels", "where :math:`\\phi_i(m)` is the NCCF at frame :math:`i` with lag", "\"A fast Griffin-Lim algorithm,\" IEEE Workshop on Applications of Signal", "quiet frames (default: 7000) lowpass_filter_width (int, optional): Integer that determines", "window length for median smoothing (in number of frames) (Default:", "above 1 may not converge. length (int or None): Array", "* a[1] + ~mask * b[1] return values, indices def", "win_length: int, power: float, normalized: bool, n_iter: int, momentum: float,", "algorithm tuned for automatic speech recognition <NAME>, <NAME>, <NAME>, <NAME>,", "win_length: int = 30, freq_low: int = 85, freq_high: int", "EPSILON = 10 ** (-9) # Number of lags to", "Workshop on Applications of Signal Processing to Audio and Acoustics", "mu-law encoding \"\"\" mu = quantization_channels - 1.0 if not", ">= 0, 'momentum={} < 0'.format(momentum) if normalized: warnings.warn( \"The argument", "win_length: int, power: Optional[float], normalized: bool, center: bool = True,", "Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2]) # (new_bins,", "* frame_time)) waveform_length = waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length / frame_size))", "processing (affects output only if ``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``)", "mel_scale == \"htk\": return 700.0 * (10.0**(mels / 2595.0) -", "suppress this warning, \" \"please use `normalized=False`.\") # pack batch", "= 85, freq_high: int = 3400, ) -> Tensor: r\"\"\"Detect", "include a channel dimension and have the form `(batch, channel,", "*= math.sqrt(2.0 / float(n_mels)) return dct.t() def mu_law_encoding( x: Tensor,", "from the power/amplitude scale to the decibel scale. The output", "the default. (default: False) Relevant if ``frames_per_chunk > 0``. recompute_frame", "= waveform.device last_window_start = last_window_end = -1 cur_sum = torch.zeros(num_channels,", "= specgram.size() if rand_init: angles = 2 * math.pi *", "= 2 * math.pi * torch.rand(batch, freq, frames) else: angles", "batch x_db = x_db.reshape(shape) return x_db def DB_to_amplitude( x: Tensor,", "Default: ``\"reflect\"`` onesided (bool, optional): controls whether to return half", "+ list(freq.shape[-1:])) return freq def sliding_window_cmn( waveform: Tensor, cmn_window: int", "__all__ = [ \"spectrogram\", \"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\",", "an online decoder would see in the first pass of", "= 0 if not center: if window_end > t: window_end", "width of the mel band (area normalization). (Default: ``None``) mel_scale", "Values near 1 can lead to faster convergence, but above", "= (freq - f_min) / f_sp # Fill in the", "encoded with values from 0 to quantization_channels - 1. Args:", "= angle(complex_specgrams[..., :1, :]) # Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams,", "= waveform[:, last_window_start, :] cur_sum -= frame_to_remove if norm_vars: cur_sumsq", "(Hz) (default: 400.0) soft_min_f0 (float, optional): Minimum f0, applied in", "freq is ``n_fft // 2 + 1`` and ``n_fft`` is", "quantization_channels: int ) -> Tensor: r\"\"\"Decode mu-law encoded signal. For", "bits_per_sample: Optional[int] = None, ) -> Tensor: r\"\"\" Apply codecs", "-> Tensor: r\"\"\"Turn a tensor from the decibel scale to", "Mels. Args: freqs (float): Frequencies in Hz mel_scale (str, optional):", "2 half = torch.max(nccf[..., lag_min:half_size], -1) best = _combine_max(half, best)", "// 2, n_freqs) # calculate mel freq bins m_min =", "def spectral_centroid( waveform: Tensor, sample_rate: int, pad: int, window: Tensor,", "\"reflect\", onesided: bool = True ) -> Tensor: r\"\"\"Create a", "of channels Returns: Tensor: Input after mu-law decoding \"\"\" mu", "is the waveform, :math:`N` is the length of a frame,", "Input should take the form `(..., freq, time)`. Batched inputs", "are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length,", "by Librosa all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) #", "waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length / frame_size)) p = lags +", "Griffin-Lim method. Values near 1 can lead to faster convergence,", "fixed # https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def angle( complex_tensor:", "1.3 # Speed up by 30% >>> phase_advance = torch.linspace(", "x_db = x_db.reshape(shape) return x_db def DB_to_amplitude( x: Tensor, ref:", "= spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs", "and each stft freq point in hertz f_diff = f_pts[1:]", "mel freq bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max,", "*= 2.0 else: assert norm == \"ortho\" dct[0] *= 1.0", "-1)) - top_db).view(-1, 1, 1, 1)) # Repack batch x_db", "* (specgram.size(axis) - value) mask_start = (min_value.long()).squeeze() mask_end = (min_value.long()", "first half of lags, then the latter is taken. \"\"\"", "of each tensor in a batch depends on the maximum", "torch.Size([2, 1025, 231, 2]) \"\"\" # pack batch shape =", "Tensor have dimension ``[channel, time]``. Otherwise, they have dimension ``[time,", "compression: Optional[float] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int]", "way, must not exceed min-f0 (default: 10.0) penalty_factor (float, optional):", "normalize by magnitude after stft center (bool, optional): whether to", "tensor from the decibel scale to the power/amplitude scale. Args:", "in a batch depends on the maximum value of that", "specgram: Tensor, win_length: int = 5, mode: str = \"replicate\"", "recovers the original Griffin-Lim method. Values near 1 can lead", "float(n_mels)) return dct.t() def mu_law_encoding( x: Tensor, quantization_channels: int )", "divide the triangular mel weights by the width of the", "(Hz) (default: 50.0) max_f0 (float, optional): Maximum F0 to search", "frame_size - waveform_length waveform = torch.nn.functional.pad(waveform, (0, p)) # Compute", "The magnitude and phase of the complex tensor \"\"\" mag", "The momentum parameter for fast Griffin-Lim. Setting this to 0", "by magnitude after stft center (bool, optional): whether to pad", "encoded signal. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_", "frame, take the highest value of NCCF, apply centered median", "rate of the audio waveform. format (str): File format. channels_first", "bins # Equivalent filterbank construction by Librosa all_freqs = torch.linspace(0,", "of dimension (..., frame) \"\"\" # pack batch shape =", "filter width of lowpass filter, more gives sharper filter. (default:", "1, 1, 1)) # Repack batch x_db = x_db.reshape(shape) return", "= torch.randn(1, 40, 1000) >>> delta = compute_deltas(specgram) >>> delta2", "companding. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This", "Whether to normalize by magnitude after stft. n_iter (int): Number", "fast Griffin-Lim. Setting this to 0 recovers the original Griffin-Lim", "- alphas) * norm_0 real_stretch = mag * torch.cos(phase_acc) imag_stretch", "specgrams def mask_along_axis( specgram: Tensor, mask_param: int, mask_value: float, axis:", "optional): If true, normalize variance to one. (bool, default =", "Processing to Audio and Acoustics (pp. 1-4), Oct. 2013. *", "freq, time), freq is ``n_fft // 2 + 1`` and", "angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \\ .to(dtype=specgram.dtype, device=specgram.device) specgram =", "torch.stack([angles.cos(), angles.sin()], dim=-1) \\ .to(dtype=specgram.dtype, device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles) #", "/ math.sqrt(2.0) dct *= math.sqrt(2.0 / float(n_mels)) return dct.t() def", "= torch.max(nccf[..., lag_min:half_size], -1) best = _combine_max(half, best) indices =", "value=0. ) indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)", "the frequency values, weighted by their magnitude. Args: waveform (Tensor):", "the complex tensor \"\"\" mag = complex_norm(complex_tensor, power) phase =", "torch.pow(torch.pow(10.0, 0.1 * x), power) def _hz_to_mel(freq: float, mel_scale: str", "mels = min_log_mel + math.log(freq / min_log_hz) / logstep return", "a multiplicative factor of the second, elementwise. \"\"\" mask =", "waveform (Tensor): Tensor of audio of dimension (..., freq, time)", "p = lags + num_of_frames * frame_size - waveform_length waveform", "Rebuild the spectrogram rebuilt = torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length,", "= torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas = time_steps %", "True, ) -> torch.Tensor: \"\"\"Extract pitch based on method described", "``n_fft`` is the number of Fourier bins, and time is", "the spectrogram coeffcients at time :math:`t`, :math:`N` is ``(win_length-1)//2``. Args:", "(int, optional): Minimum CMN window used at start of decoding", "time) mask_param (int): Number of columns to be masked will", "torch.cat(output_lag, -1) return nccf def _combine_max( a: Tuple[Tensor, Tensor], b:", "`waveform`. frame_length (float, optional): Frame length in milliseconds. (default: 25.0)", "tensor before being converted to power/amplitude scale. ref (float): Reference", "Maximum number of frames of latency that we allow pitch", "else: window_start = t - cmn_window window_end = t +", "see in the first pass of decoding -- not the", "[2] <NAME>., <NAME>., & <NAME>. \"A fast Griffin-Lim algorithm,\" IEEE", "in v0.9.0 release. To suppress this warning, \" \"please use", "\"\"\" return ref * torch.pow(torch.pow(10.0, 0.1 * x), power) def", "spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if power is not", "** 2) / (window_frames ** 2)) variance = torch.pow(variance, -0.5)", "be right-multiplied to row-wise data of size (``n_mels``, ``n_mfcc``). \"\"\"", "(Default: ``5``) mode (str, optional): Mode parameter passed to padding", "it has ``[channel, time]`` else ``[time, channel]``. \"\"\" bytes =", "measures. (default: 0.005) nccf_ballast (float, optional): Increasing this factor reduces", "return x_db def DB_to_amplitude( x: Tensor, ref: float, power: float", "the current frame (to the extent possible, modulo end effects).", "typing import Optional, Tuple import torch from torch import Tensor", "returned instead. normalized (bool): Whether to normalize by magnitude after", "sample_rate: int, norm: Optional[str] = None, mel_scale: str = \"htk\",", "values from 0 to quantization_channels - 1. Args: x (Tensor):", "= specgram.device dtype = specgram.dtype # pack batch shape =", "is scaled to be approx constant energy per channel enorm", "cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_waveform = torch.zeros( num_channels,", "audio of dimension (..., freq, time) sample_rate (int): The sample", "freq, time, complex=2)` rate (float): Speed-up factor phase_advance (Tensor): Expected", "Length of hop between STFT windows. ( Default: ``win_length //", "- window_start, :] cur_sum += torch.sum(input_part, 1) if norm_vars: cur_sumsq", "the signal energy. Relevant if ``frames_per_chunk > 0``. (default: 500)", "return specgrams def mask_along_axis( specgram: Tensor, mask_param: int, mask_value: float,", "matrix) of size (``n_freqs``, ``n_mels``) meaning number of frequencies to", ">>> delta = compute_deltas(specgram) >>> delta2 = compute_deltas(delta) \"\"\" device", "return dct.t() def mu_law_encoding( x: Tensor, quantization_channels: int ) ->", "for energy, 2 for power, etc. If None, then the", "return cmn_waveform def spectral_centroid( waveform: Tensor, sample_rate: int, pad: int,", "``x`` db_multiplier (float): Log10(max(reference value and amin)) top_db (float or", "+ list(shape[-2:])) value = torch.rand(1) * mask_param min_value = torch.rand(1)", "create overlapping triangles zero = torch.zeros(1) down_slopes = (-1.0 *", "Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)`", "codecs as a form of augmentation. Args: waveform (Tensor): Audio", "Default: ``True`` Returns: Tensor: Dimension (..., freq, time), freq is", "torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) /", "frequency. It is implemented using normalized cross-correlation function and median", "power and 20. for amplitude amin (float): Number to clamp", "1025, 231, 2]) \"\"\" # pack batch shape = complex_specgrams.size()", "= True, compression: Optional[float] = None, encoding: Optional[str] = None,", "Integer that determines filter width when upsampling NCCF. (default: 5)", "after revising our estimate of the signal energy. Relevant if", "if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale should be", "down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs,", "int, mask_value: float, axis: int ) -> Tensor: r\"\"\" Apply", "mag * torch.cos(phase_acc) imag_stretch = mag * torch.sin(phase_acc) complex_specgrams_stretch =", "the first pass of decoding -- not the final version", "= angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)", "= None, encoding: Optional[str] = None, bits_per_sample: Optional[int] = None,", "(float or None, optional): Minimum negative cut-off in decibels. A", "(int): Window size power (float or None): Exponent for the", "torch.Tensor, sample_rate: float, frame_length: float = 25.0, frame_shift: float =", ") -> Tensor: r\"\"\"Turn a tensor from the decibel scale", "the angle of complex tensor input. Args: complex_tensor (Tensor): Tensor", "Number of channels Returns: Tensor: Input after mu-law decoding \"\"\"", "(either 'ortho' or None) Returns: Tensor: The transformation matrix, to", "dct[0] *= 1.0 / math.sqrt(2.0) dct *= math.sqrt(2.0 / float(n_mels))", "mu return x def complex_norm( complex_tensor: Tensor, power: float =", "shape `(..., 2)` into its magnitude and phase. Args: complex_tensor", "(int, default = 600) min_cmn_window (int, optional): Minimum CMN window", "f_max: float, n_mels: int, sample_rate: int, norm: Optional[str] = None,", "time) \"\"\" specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length,", "\"please use `normalized=False`.\") # pack batch shape = specgram.size() specgram", "s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] output_frames =", "centroid is defined as the weighted average of the frequency", "= mag * torch.cos(phase_acc) imag_stretch = mag * torch.sin(phase_acc) complex_specgrams_stretch", "Args: freqs (float): Frequencies in Hz mel_scale (str, optional): Scale", "norm is not None and norm != \"slaney\": raise ValueError(\"norm", "-1, shape[-1]) assert win_length >= 3 n = (win_length -", "def _combine_max( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh: float", "power) def angle( complex_tensor: Tensor ) -> Tensor: r\"\"\"Compute the", "amin: float, db_multiplier: float, top_db: Optional[float] = None ) ->", "The sample rate of the waveform (Hz) frame_time (float, optional):", "Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.", "dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_waveform =", "= phase + phase_advance phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)", "computing delta (Default: ``5``) mode (str, optional): Mode parameter passed", "energy normalization. (default: 0) simulate_first_pass_online (bool, optional): If true, the", "Angle of a complex tensor. Shape of `(..., )` \"\"\"", "our phase estimates angles = rebuilt if momentum: angles =", "-> Tensor: r\"\"\"Compute the norm of complex tensor input. Args:", "dimension (..., frame) \"\"\" input_shape = waveform.shape num_frames, num_feats =", "divided by the frame-shift. This makes different types of features", "close to the first half of lags, then the latter", "def griffinlim( specgram: Tensor, window: Tensor, n_fft: int, hop_length: int,", "1 for energy, 2 for power, etc. normalized (bool): Whether", "by a factor of ``rate``. Args: complex_specgrams (Tensor): Dimension of", "centered on the current frame (to the extent possible, modulo", "int, hop_length: int, win_length: int, power: float, normalized: bool, n_iter:", "to apply masking on (1 -> frequency, 2 -> time)", "input_shape = waveform.shape num_frames, num_feats = input_shape[-2:] waveform = waveform.view(-1,", "def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float, frame_length: float = 25.0,", "(freq / 700.0)) # Fill in the linear part f_min", "list(shape[-3:])) time_steps = torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas =", "result would be ``A * create_fb_matrix(A.size(-1), ...)``. \"\"\" if norm", "the equivalent of `compute-kaldi-pitch-feats` from Kaldi. Args: waveform (Tensor): The", "Equivalent filterbank construction by Librosa all_freqs = torch.linspace(0, sample_rate //", "\"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\",", "cmn_window (int, optional): Window in frames for running average CMN", "of `(..., freq, time, complex=2)` rate (float): Speed-up factor phase_advance", "import torch from torch import Tensor from torchaudio._internal import module_utils", "window_end cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum", "and norm != \"slaney\": raise ValueError(\"norm must be one of", "mask_param] mask_value (float): Value to assign to the masked columns", "N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where :math:`\\phi_i(m)` is the NCCF at", "``True`` Returns: Tensor: Dimension (..., freq, time), freq is ``n_fft", "2] - f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any():", "_median_smoothing(indices, win_length) # Convert indices to frequency EPSILON = 10", "/ 27.0 log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz", "freqs = f_min + f_sp * mels # And now", "freq point in hertz f_diff = f_pts[1:] - f_pts[:-1] #", "for _ in range(n_iter): # Store the previous iterate tprev", "frame_size, frame_size)[..., :num_of_frames, :] output_frames = ( (s1 * s2).sum(-1)", "algorithm measures. (default: 0.005) nccf_ballast (float, optional): Increasing this factor", "windows win_length (int): Window size power (float or None): Exponent", "onesided=onesided, return_complex=True, ) # unpack batch spec_f = spec_f.reshape(shape[:-1] +", "= rebuilt if momentum: angles = angles - tprev.mul_(momentum /", "will have the same mask interval. Args: specgram (Tensor): Real", "(min_value.long()).squeeze() mask_end = (min_value.long() + value.long()).squeeze() assert mask_end - mask_start", "r\"\"\"Given a STFT tensor, speed up in time without modifying", "= waveform.reshape([-1] + shape[-1:]) nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)", "= torch.nn.functional.pad(waveform, (pad, pad), \"constant\") # pack batch shape =", "= torch.cos(math.pi / float(n_mels) * (n + 0.5) * k)", "int = 5, mode: str = \"replicate\" ) -> Tensor:", "of NCCF, apply centered median smoothing, and convert to frequency.", "weighted average of the frequency values, weighted by their magnitude.", "method used when :attr:`center` is ``True``. Default: ``\"reflect\"`` onesided (bool,", "512 >>> # (channel, freq, time, complex=2) >>> complex_specgrams =", "50.0) max_f0 (float, optional): Maximum F0 to search for (Hz)", "won't be snipped, so that the number of frames is", "= input_shape[-2:] waveform = waveform.view(-1, num_frames, num_feats) num_channels = waveform.shape[0]", "def DB_to_amplitude( x: Tensor, ref: float, power: float ) ->", "Tensor, sample_rate: int, frame_time: float, freq_low: int ) -> Tensor:", "from Kaldi. Args: waveform (Tensor): The input waveform of shape", "= 500, snip_edges: bool = True, ) -> torch.Tensor: \"\"\"Extract", "bins, and time is the number of window hops (n_frame).", "time) win_length (int, optional): The window length used for computing", "specgram[:, :, mask_start:mask_end] = mask_value else: raise ValueError('Only Frequency and", "complex tensor. Shape of `(..., )` \"\"\" return torch.atan2(complex_tensor[..., 1],", "kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1,", "int, freq_high: int ) -> Tensor: r\"\"\" For each frame,", "(..., frame) \"\"\" input_shape = waveform.shape num_frames, num_feats = input_shape[-2:]", "torch.rand(1) * mask_param min_value = torch.rand(1) * (specgram.size(axis) - value)", "if axis == 1: specgram[:, mask_start:mask_end] = mask_value elif axis", "non-critical parameter; the frame at which we recompute some of", "(n_freqs, n_mels + 2) # create overlapping triangles zero =", "Tensor: r\"\"\"Detect pitch frequency. It is implemented using normalized cross-correlation", "values. \" f\"The value for `n_mels` ({n_mels}) may be set", "* (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu return x", "[] for lag in range(1, lags + 1): s1 =", "optional): Cutoff frequency for LowPass filter (Hz) (default: 1000) resample_frequency", "complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps = torch.arange(0, complex_specgrams.size(-2), rate,", "what an online decoder would see in the first pass", "``[v_0, v_0 + v)``, where ``v`` is sampled from ``uniform(0,", "\"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\",", ":py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting Tensor. If ``channels_first=True``, it has ``[channel,", "(Hz) frame_time (float, optional): Duration of a frame (Default: ``10", "waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def amplitude_to_DB( x: Tensor, multiplier:", "2, n_freqs) # calculate mel freq bins m_min = _hz_to_mel(f_min,", "io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression, format, encoding, bits_per_sample )", "~mask * b[0] indices = mask * a[1] + ~mask", "may not converge. length (int or None): Array length of", ") indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll", "** (-9) freq = sample_rate / (EPSILON + indices.to(torch.float)) #", "Norm to use (either 'ortho' or None) Returns: Tensor: The", "torch.nn.functional.pad(waveform, (pad, pad), \"constant\") # pack batch shape = waveform.size()", "dct *= math.sqrt(2.0 / float(n_mels)) return dct.t() def mu_law_encoding( x:", "not used in Griffin-Lim, \" \"and will be removed in", "torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False,", "warnings.warn( \"The argument normalized is not used in Griffin-Lim, \"", "n_mels (int): Number of mel filterbanks sample_rate (int): Sample rate", "mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default:", "default. (default: False) Relevant if ``frames_per_chunk > 0``. recompute_frame (int,", ") -> Tensor: r\"\"\"Compute waveform from a linear scale magnitude", "/ mu) * 2 - 1.0 x = torch.sign(x) *", "waveform.view(-1, num_frames, num_feats) num_channels = waveform.shape[0] dtype = waveform.dtype device", "Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. \"\"\" shape", "mels = (freq - f_min) / f_sp # Fill in", "Minimum negative cut-off in decibels. A reasonable number is 80.", "values, _ = torch.median(roll, -1) return values def detect_pitch_frequency( waveform:", "return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def angle( complex_tensor: Tensor ) ->", "+ 1): s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]", "to. Must be more than twice lowpass-cutoff. (default: 4000) delta_pitch(", ">>> specgram = torch.randn(1, 40, 1000) >>> delta = compute_deltas(specgram)", "the maximum value of that tensor, and so may return", "* b[0] indices = mask * a[1] + ~mask *", "dtype = specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param", "= [] for lag in range(1, lags + 1): s1", "Tensor shape of `(..., complex=2)` Return: Tensor: Angle of a", "signal has been scaled to between -1 and 1 and", "torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) # Per batch example masking specgrams", "dimension (..., time) pad (int): Two sided padding of signal", "weights by the width of the mel band (area normalization).", "logstep = math.log(6.4) / 27.0 log_t = (mels >= min_log_mel)", "float, f_max: float, n_mels: int, sample_rate: int, norm: Optional[str] =", "of mfc coefficients to retain n_mels (int): Number of mel", "``slaney``. (Default: ``htk``) Returns: Tensor: Triangular filter banks (fb matrix)", "(1 -> frequency, 2 -> time) Returns: Tensor: Masked spectrogram", "lags = int(math.ceil(sample_rate / freq_low)) frame_size = int(math.ceil(sample_rate * frame_time))", "a signal scaled between -1 and 1. Args: x_mu (Tensor):", "return ref * torch.pow(torch.pow(10.0, 0.1 * x), power) def _hz_to_mel(freq:", "* [1] McFee, Brian, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and", "freq_low: int = 85, freq_high: int = 3400, ) ->", "- 1.0 if not x.is_floating_point(): x = x.to(torch.float) mu =", "1. Args: x_mu (Tensor): Input tensor quantization_channels (int): Number of", "= torch.nn.functional.pad( indices, (pad_length, 0), mode=\"constant\", value=0. ) indices[..., :pad_length]", "\"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct", "= 600) min_cmn_window (int, optional): Minimum CMN window used at", "bool = False, recompute_frame: int = 500, snip_edges: bool =", "all zero values. \" f\"The value for `n_mels` ({n_mels}) may", "torch.Tensor: waveform of (..., time), where time equals the ``length``", "apply_codec( waveform: Tensor, sample_rate: int, format: str, channels_first: bool =", "torch.view_as_real(spec_f) def griffinlim( specgram: Tensor, window: Tensor, n_fft: int, hop_length:", "indices ``[v_0, v_0 + v)``, where ``v`` is sampled from", "phase estimates angles = rebuilt if momentum: angles = angles", "the linear scale f_min = 0.0 f_sp = 200.0 /", "a spectrogram: .. math:: d_t = \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} -", "0 window_end = 0 if center: window_start = t -", "specgram.shape[-2:]) return specgram def compute_deltas( specgram: Tensor, win_length: int =", "window centered on the current frame (to the extent possible,", "(window_frames ** 2)) variance = torch.pow(variance, -0.5) cmn_waveform[:, t, :]", "pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) #", "variance -= ((cur_sum ** 2) / (window_frames ** 2)) variance", "torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1) return", "int, norm: Optional[str] = None, mel_scale: str = \"htk\", )", "Audio and music signal analysis in python.\" In Proceedings of", "= torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) #", "int = 100, center: bool = False, norm_vars: bool =", "be snipped, so that the number of frames is the", "None, optional): Minimum negative cut-off in decibels. A reasonable number", "Expected phase advance in each bin. Dimension of (freq, 1)", "a tensor, usually a spectrogram: .. math:: d_t = \\frac{\\sum_{n=1}^{\\text{N}}", "return specgram def compute_deltas( specgram: Tensor, win_length: int = 5,", "-> frequency, 3 -> time) Returns: Tensor: Masked spectrograms of", "channels Returns: Tensor: Input after mu-law encoding \"\"\" mu =", "input tensor. Shape of `(..., )` \"\"\" # Replace by", "in Hz \"\"\" if mel_scale not in ['slaney', 'htk']: raise", "torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value) # Create broadcastable", "# Rebuild the spectrogram rebuilt = torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft,", "set too high. \" f\"Or, the value for `n_freqs` ({n_freqs})", "transformation matrix with shape (``n_mels``, ``n_mfcc``), normalized depending on norm.", "/ torch.log1p(mu) x_mu = ((x_mu + 1) / 2 *", "= 100) center (bool, optional): If true, use a window", "1.0 ) -> Tensor: r\"\"\"Compute the norm of complex tensor", ":math:`t`-th frame is centered at time :math:`t \\times \\text{hop\\_length}`. Default:", "- waveform_length waveform = torch.nn.functional.pad(waveform, (0, p)) # Compute lags", "is returned instead. normalized (bool): Whether to normalize by magnitude", "of the waveform (Hz) frame_time (float, optional): Duration of a", "x (Tensor): Input tensor quantization_channels (int): Number of channels Returns:", "int ) -> Tensor: r\"\"\"Decode mu-law encoded signal. For more", "be scaled by. power (float): If power equals 1, will", "of FFT, creates ``n_fft // 2 + 1`` bins hop_length", "mel filterbanks sample_rate (int): Sample rate of the audio waveform", "Power of the norm. (Default: `1.0`). Returns: Tensor: Power of", "2) >>> rate = 1.3 # Speed up by 30%", "mode=\"constant\", value=0. ) indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)],", "a matrix A of size (..., ``n_freqs``), the applied result", "of hop between STFT windows win_length (int): Window size power", "// 2 # \"replicate\" padding in any dimension indices =", "0, 2]) # (new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())", "Tensor: Tensor of freq of dimension (..., frame) \"\"\" #", "+ indices.to(torch.float)) # unpack batch freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))", "r\"\"\"Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``), normalized", "to each frame/window n_fft (int): Size of FFT hop_length (int):", "specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform: Tensor, sample_rate: int, format: str,", "Store the previous iterate tprev = rebuilt # Invert with", "(float, optional): Duration of a frame (Default: ``10 ** (-2)``).", "def mask_along_axis( specgram: Tensor, mask_param: int, mask_value: float, axis: int", "complex_tensor (Tensor): Tensor shape of `(..., complex=2)` power (float): Power", "default = false) Returns: Tensor: Tensor of freq of dimension", "min_f0: float = 50, max_f0: float = 400, soft_min_f0: float", "(a[0] > thresh * b[0]) values = mask * a[0]", "time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long()) angle_0 = angle(complex_specgrams_0)", "* mask_param min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) -", "= torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi()", "This expects an input with values between 0 and quantization_channels", "a STFT tensor, speed up in time without modifying pitch", "Tensor, sample_rate: int, frame_time: float = 10 ** (-2), win_length:", "pad_mode: str = \"reflect\", onesided: bool = True ) ->", "Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Decode mu-law encoded signal.", "be detected (Hz) (Default: ``85``). freq_high (int, optional): Highest frequency", "cmn_waveform def spectral_centroid( waveform: Tensor, sample_rate: int, pad: int, window:", "- f_min) / f_sp logstep = math.log(6.4) / 27.0 if", "computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi. Args: waveform (Tensor):", "-1 and 1 and returns a signal encoded with values", "(1 - alphas) * norm_0 real_stretch = mag * torch.cos(phase_acc)", "also ```channels_first```. sample_rate (int): Sample rate of the audio waveform.", "= window_end cmn_waveform[:, t, :] = waveform[:, t, :] -", "Window size. (Default: ``n_fft``) power (float): Exponent for the magnitude", "the phase batch, freq, frames = specgram.size() if rand_init: angles", "(str, optional): Changes the encoding for the supported formats. For", "\"\"\" shape = waveform.shape waveform = waveform.reshape(-1, shape[-1]) result =", "(int): Length of hop between STFT windows. ( Default: ``win_length", "\"htk\" or \"slaney\".') if mel_scale == \"htk\": return 2595.0 *", "= max(t + 1, min_cmn_window) if window_end > num_frames: window_start", "spectrograms (batch, channel, freq, time) mask_param (int): Number of columns", "= 600, min_cmn_window: int = 100, center: bool = False,", "scale magnitude spectrogram using the Griffin-Lim transformation. Implementation ported from", "Output tensor in decibel scale \"\"\" x_db = multiplier *", "ceil(time/rate), complex=2)` Example >>> freq, hop_length = 1025, 512 >>>", "initialize the previous iterate to 0 rebuilt = torch.tensor(0.) for", "use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: mels (float): Frequency", "* k) # size (n_mfcc, n_mels) if norm is None:", "a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh: float = 0.99", "normalized depending on norm. Args: n_mfcc (int): Number of mfc", "(fb.max(dim=0).values == 0.).any(): warnings.warn( \"At least one mel filterbank has", "time)`. multiplier (float): Use 10. for power and 20. for", "window_end > t: window_end = max(t + 1, min_cmn_window) if", "part min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) /", "in science conference, pp. 18-25. 2015. * [2] <NAME>., <NAME>.,", "bins hop_length (int): Length of hop between STFT windows. (", "signal to. Must be more than twice lowpass-cutoff. (default: 4000)", "+ math.log(freq / min_log_hz) / logstep return mels def _mel_to_hz(mels:", "columns axis (int): Axis to apply masking on (1 ->", "None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape #", "torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) # unpack", ") -> Tensor: r\"\"\" Compute the spectral centroid for each", "for energy normalization. (default: 0) simulate_first_pass_online (bool, optional): If true,", "Invert with our current estimate of the phases inverse =", "# Slaney-style mel is scaled to be approx constant energy", "<https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been scaled to", "output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom # unpack batch", "masking are supported') # unpack batch specgram = specgram.reshape(shape[:-2] +", "frame_length: float = 25.0, frame_shift: float = 10.0, min_f0: float", "torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True,", "highest value of NCCF, apply centered median smoothing, and convert", "freq of dimension (..., frame) \"\"\" input_shape = waveform.shape num_frames,", "power/amplitude scale to the decibel scale. The output of each", "is None: dct *= 2.0 else: assert norm == \"ortho\"", "the weighted average of the frequency values, weighted by their", "(int): The sample rate of the waveform (Hz) frame_time (float,", "n_fft (int): Size of FFT hop_length (int): Length of hop", "min_log_mel)) return freqs def create_fb_matrix( n_freqs: int, f_min: float, f_max:", "freq >= min_log_hz: mels = min_log_mel + math.log(freq / min_log_hz)", "\"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\",", "norm of complex tensor input. Args: complex_tensor (Tensor): Tensor shape", "see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting Tensor. If ``channels_first=True``, it has", "phase advance in each bin. Dimension of (freq, 1) Returns:", "dimension (..., freq, time) win_length (int, optional): The window length", "n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length).float() # Rebuild the spectrogram rebuilt", "dimension ``[time, channel]``. compression (float): Used for formats other than", "600) min_cmn_window (int, optional): Minimum CMN window used at start", "= 0 if last_window_start == -1: input_part = waveform[:, window_start:", "should take the form `(..., freq, time)`. Batched inputs should", "win_length, 1) values, _ = torch.median(roll, -1) return values def", "= best[1] # Add back minimal lag indices += lag_min", "is 80. (Default: ``None``) Returns: Tensor: Output tensor in decibel", "power (float): If power equals 1, will compute DB to", "= torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1, win_length,", "if center: window_start = t - cmn_window // 2 window_end", "number of window hops (n_frame). \"\"\" if pad > 0:", "# Update our phase estimates angles = rebuilt if momentum:", "(Default: ``htk``) Returns: freqs (Tensor): Mels converted in Hz \"\"\"", "if norm is not None and norm == \"slaney\": #", "and 1 and returns a signal encoded with values from", "(float): Use 10. for power and 20. for amplitude amin", "[ \"spectrogram\", \"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\", \"compute_deltas\",", ".. math:: d_t = \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}}", "= torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom # unpack batch output", "Minimum frequency (Hz) f_max (float): Maximum frequency (Hz) n_mels (int):", "(Tensor): Tensor of audio of dimension (..., freq, time) sample_rate", "lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] output_frames = ( (s1 *", "is ``n_fft // 2 + 1``. window (Tensor): Window tensor", "phase_acc = torch.cumsum(phase, -1) mag = alphas * norm_1 +", "if norm_vars: cur_sumsq -= (frame_to_remove ** 2) if window_end >", "85, freq_high: int = 3400, ) -> Tensor: r\"\"\"Detect pitch", "current frame (to the extent possible, modulo end effects). If", "delta (Default: ``5``) mode (str, optional): Mode parameter passed to", "sample_rate: int, frame_time: float, freq_low: int ) -> Tensor: r\"\"\"", "half of lags, then the latter is taken. \"\"\" lag_min", "waveform (Tensor): The input waveform of shape `(..., time)`. sample_rate", "min_log_hz: mels = min_log_mel + math.log(freq / min_log_hz) / logstep", "/ power) # randomly initialize the phase batch, freq, frames", "cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :] else: if", "``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. Args:", "number of frames of latency that we allow pitch tracking", "tensor in decibel scale \"\"\" x_db = multiplier * torch.log10(torch.clamp(x,", "/ 3 specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel =", "``htk`` or ``slaney``. (Default: ``htk``) Returns: Tensor: Triangular filter banks", "(Tensor): Real spectrograms (batch, channel, freq, time) mask_param (int): Number", "int ) -> Tensor: r\"\"\" Compute Normalized Cross-Correlation Function (NCCF).", "is very close to the first half of lags, then", ":] s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] output_frames", "else ``[time, channel]``. \"\"\" bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate,", "batch of spectrograms from a raw audio signal. The spectrogram", "values, indices def _find_max_per_frame( nccf: Tensor, sample_rate: int, freq_high: int", "tensor in a batch depends on the maximum value of", "shape of `(..., complex=2)` Return: Tensor: Angle of a complex", "The spectrogram can be either magnitude-only or complex. Args: waveform", "-1, :] else: if window_start > last_window_start: frame_to_remove = waveform[:,", "f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1) slopes", "(int, optional): Integer that determines filter width when upsampling NCCF.", "None, mel_scale: str = \"htk\", ) -> Tensor: r\"\"\"Create a", "None, None] mask_end = (min_value + value)[..., None, None] mask", "== false, ignored if center==true (int, default = 100) center", ">>> phase_advance = torch.linspace( >>> 0, math.pi * hop_length, freq)[...,", "Normalized Cross-Correlation Function (NCCF). .. math:: \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i +", "input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True,", "norm_vars: cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :] else:", "None or 'slaney'\") # freq bins # Equivalent filterbank construction", "algorithm assumes the signal has been scaled to between -1", "10.0) min_f0 (float, optional): Minimum F0 to search for (Hz)", "(int): Number of mfc coefficients to retain n_mels (int): Number", "2 - 1.0 x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu))", "the signal to. Must be more than twice lowpass-cutoff. (default:", "with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window,", "of complex tensor input. Args: complex_tensor (Tensor): Tensor shape of", "b[0] indices = mask * a[1] + ~mask * b[1]", "(default: 1000) resample_frequency (float, optional): Frequency that we down-sample the", "/ frame_size)) p = lags + num_of_frames * frame_size -", "in decibel scale \"\"\" x_db = multiplier * torch.log10(torch.clamp(x, min=amin))", "info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input", "(Default: ``3400``). Returns: Tensor: Tensor of freq of dimension (...,", "mu = torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu) / mu) *", "Tensor: r\"\"\"Decode mu-law encoded signal. For more info see the", "-= (window_end - num_frames) window_end = num_frames if window_start <", "mels def _mel_to_hz(mels: Tensor, mel_scale: str = \"htk\") -> Tensor:", "(int, optional): Changes the bit depth for the supported formats.", "to return half of results to avoid redundancy. Default: ``True``", "sliding-window cepstral mean (and optionally variance) normalization per utterance. Args:", "then the complex spectrum is returned instead. normalized (bool): Whether", "int, window: Tensor, n_fft: int, hop_length: int, win_length: int, power:", "= (min_value.long() + value.long()).squeeze() assert mask_end - mask_start < mask_param", "frames of latency that we allow pitch tracking to introduce", "supports it waveform = torch.nn.functional.pad(waveform, (pad, pad), \"constant\") # pack", "value for `n_freqs` ({n_freqs}) may be set too low.\" )", "encoding (str, optional): Changes the encoding for the supported formats.", "between -1 and 1. Args: x_mu (Tensor): Input tensor quantization_channels", "controls the padding method used when :attr:`center` is ``True``. Default:", "of size (``n_freqs``, ``n_mels``) meaning number of frequencies to highlight/apply", "math.pi)) # Compute Phase Accum phase = phase + phase_advance", "average CMN computation (int, default = 600) min_cmn_window (int, optional):", "f_pts[1:] - f_pts[:-1] # (n_mels + 1) slopes = f_pts.unsqueeze(0)", "p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1) return nccf", "of lowpass filter, more gives sharper filter. (default: 1) upsample_filter_width", "that we allow pitch tracking to introduce into the feature", "size Returns: Tensor: Dimension (..., time) \"\"\" specgram = spectrogram(waveform,", "(Tensor): Window tensor that is applied/multiplied to each frame/window n_fft", "(int): Size of FFT hop_length (int): Length of hop between", "num_channels, num_feats, dtype=dtype, device=device) else: variance = cur_sumsq variance =", "* torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1) # unpack batch", "Input tensor before being converted to power/amplitude scale. ref (float):", "for fast Griffin-Lim. Setting this to 0 recovers the original", "for power, etc. If None, then the complex spectrum is", "= specgram.reshape([-1] + list(shape[-2:])) specgram = specgram.pow(1 / power) #", "up_slopes)) if norm is not None and norm == \"slaney\":", "other than WAV. For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str,", "alphas) * norm_0 real_stretch = mag * torch.cos(phase_acc) imag_stretch =", "the encoding for the supported formats. For more details see", "The number of frames used for energy normalization. (default: 0)", "= sample_rate / (EPSILON + indices.to(torch.float)) # unpack batch freq", "See also ```channels_first```. sample_rate (int): Sample rate of the audio", "waveform = waveform.view(-1, num_frames, num_feats) num_channels = waveform.shape[0] dtype =", "length (int or None): Array length of the expected output.", "= num_frames if window_start < 0: window_start = 0 if", "waveform from a linear scale magnitude spectrogram using the Griffin-Lim", "upsampling NCCF. (default: 5) max_frames_latency (int, optional): Maximum number of", "2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels]) fb *= enorm.unsqueeze(0)", "dim=-1) roll = indices.unfold(-1, win_length, 1) values, _ = torch.median(roll,", "create_fb_matrix(A.size(-1), ...)``. \"\"\" if norm is not None and norm", "(Default: ``n_fft``) power (float): Exponent for the magnitude spectrogram, (must", "value)[..., None, None] mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) #", ":math:`w` is the waveform, :math:`N` is the length of a", "specgram.reshape([-1] + list(shape[-2:])) specgram = specgram.pow(1 / power) # randomly", "Batched inputs should include a channel dimension and have the", "Proceedings of the 14th python in science conference, pp. 18-25.", "(..., freq, frames) where freq is ``n_fft // 2 +", "will be scaled by. power (float): If power equals 1,", "* create_fb_matrix(A.size(-1), ...)``. \"\"\" if norm is not None and", "the number of window hops (n_frame). \"\"\" if pad >", "Return the final phase estimates waveform = torch.istft(specgram * angles,", "give the same number of frames. (default: True) Returns: Tensor:", "complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1) # unpack batch complex_specgrams_stretch =", "default = 600) min_cmn_window (int, optional): Minimum CMN window used", "rate of the audio waveform norm (Optional[str]): If 'slaney', divide", "\"spectrogram\", \"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\",", "detect_pitch_frequency( waveform: Tensor, sample_rate: int, frame_time: float = 10 **", "the masked columns axis (int): Axis to apply masking on", "multiplier (float): Use 10. for power and 20. for amplitude", "Tuple[Tensor, Tensor], thresh: float = 0.99 ) -> Tuple[Tensor, Tensor]:", "* (2 * n + 1) / 3 specgram =", "2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) # calculate the difference between", "``[channel, time]``. Otherwise, they have dimension ``[time, channel]``. compression (float):", "else 1 x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db =", "= shape[-3] if x_db.dim() > 2 else 1 x_db =", "n_mels) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) if norm is not", "the number of filterbanks. Each column is a filterbank so", "float = 10 ** (-2), win_length: int = 30, freq_low:", "used when :attr:`center` is ``True``. Default: ``\"reflect\"`` onesided (bool, optional):", "Tensor of freq of dimension (..., frame) \"\"\" # pack", "1 and returns a signal scaled between -1 and 1.", "into its magnitude and phase. Args: complex_tensor (Tensor): Tensor shape", "Size of FFT hop_length (int): Length of hop between STFT", "power (float): Exponent for the magnitude spectrogram, (must be >", "lags is very close to the first half of lags,", "complex=2)` power (float): Power of the norm. (Default: `1.0`). Returns:", "Shape of `(..., )` \"\"\" return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])", "sample_rate (int): Sample rate of the audio waveform norm (Optional[str]):", "(int, optional): The number of frames used for energy normalization.", "= ((x_mu) / mu) * 2 - 1.0 x =", "a frequency bin conversion matrix. Args: n_freqs (int): Number of", "= waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] s2 = waveform[...,", "mu_law_encoding( x: Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Encode signal", "\"htk\": return 2595.0 * math.log10(1.0 + (freq / 700.0)) #", "= phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with 231 ==", "= torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2]) # (new_bins, freq, 2)", "Applications of Signal Processing to Audio and Acoustics (pp. 1-4),", "ported from `librosa`. * [1] McFee, Brian, <NAME>, <NAME>, <NAME>,", "given. \"\"\" assert momentum < 1, 'momentum={} > 1 can", "0.005, nccf_ballast: float = 7000, lowpass_filter_width: int = 1, upsample_filter_width:", "Audio data. Must be 2 dimensional. See also ```channels_first```. sample_rate", "filter, more gives sharper filter. (default: 1) upsample_filter_width (int, optional):", "avoid redundancy. Default: ``True`` Returns: Tensor: Dimension (..., freq, time),", "of the mel band (area normalization). (Default: ``None``) mel_scale (str,", "snipped, so that the number of frames is the file", "\"\"\" if pad > 0: # TODO add \"with torch.no_grad():\"", "0, 'momentum={} < 0'.format(momentum) if normalized: warnings.warn( \"The argument normalized", "of filterbanks. Each column is a filterbank so that assuming", "to normalize by magnitude after stft. n_iter (int): Number of", "- 1. Args: x (Tensor): Input tensor quantization_channels (int): Number", "* torch.exp(logstep * (mels[log_t] - min_log_mel)) return freqs def create_fb_matrix(", "freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps", "multiplier * db_multiplier if top_db is not None: # Expand", "to frequency. Note: If the max among all the lags", "frames 2)`` where the last dimension corresponds to pitch and", "x the number of filterbanks. Each column is a filterbank", "n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, sample_rate //", "filterbank so that assuming there is a matrix A of", "using the Griffin-Lim transformation. Implementation ported from `librosa`. * [1]", "float, power: float ) -> Tensor: r\"\"\"Turn a tensor from", "a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``), normalized depending", "of decoding -- not the final version of the features,", "to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: mels (float):", "float, frame_length: float = 25.0, frame_shift: float = 10.0, min_f0:", "def spectrogram( waveform: Tensor, pad: int, window: Tensor, n_fft: int,", "0: window_start = 0 if last_window_start == -1: input_part =", "spec_f /= window.pow(2.).sum().sqrt() if power is not None: if power", "(freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform: Tensor,", "to search for (Hz) (default: 400.0) soft_min_f0 (float, optional): Minimum", "features that correspond to what an online decoder would see", "Slaney-style mel is scaled to be approx constant energy per", "tensor, and so may return different values for an audio", "optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns:", "x.is_floating_point(): x = x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu =", "NCCF at frame :math:`i` with lag :math:`m`, :math:`w` is the", "+ list(shape[-2:])) specgram = specgram.pow(1 / power) # randomly initialize", "window_end -= window_start window_start = 0 if not center: if", "window hops (n_frame). \"\"\" if pad > 0: # TODO", "\"mu_law_decoding\", \"complex_norm\", \"angle\", \"magphase\", \"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\",", "\"htk\") -> Tensor: \"\"\"Convert mel bin numbers to frequencies. Args:", "elif axis == 2: specgram[:, :, mask_start:mask_end] = mask_value else:", "quantization_channels - 1.0 if not x.is_floating_point(): x = x.to(torch.float) mu", "(Hz) (Default: ``85``). freq_high (int, optional): Highest frequency that can", "for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns:", "= None ) -> Tensor: r\"\"\"Turn a spectrogram from the", "+ complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor, mask_param: int,", "output will be scaled by. power (float): If power equals", "zero otherwise. Returns: torch.Tensor: waveform of (..., time), where time", "f0, applied in soft way, must not exceed min-f0 (default:", "best = torch.max(nccf[..., lag_min:], -1) half_size = nccf.shape[-1] // 2", "n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) # unpack batch waveform =", "audio waveform. format (str): File format. channels_first (bool): When True,", "mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max, n_mels + 2) f_pts =", "+ 1).long()) angle_0 = angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1) norm_0 =", "else: angles = torch.zeros(batch, freq, frames) angles = torch.stack([angles.cos(), angles.sin()],", "= specgrams.transpose(axis, -1) return specgrams def mask_along_axis( specgram: Tensor, mask_param:", "phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with 231 == ceil(300", "2) window_frames = window_end - window_start last_window_start = window_start last_window_end", "Speech and Signal Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi:", "in the first pass of decoding -- not the final", "dim=-1) # unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return", "empirical calibration offset indices += 1 return indices def _median_smoothing(", "waveform: Tensor, cmn_window: int = 600, min_cmn_window: int = 100,", "in python.\" In Proceedings of the 14th python in science", "dim=-1) phase_acc = torch.cumsum(phase, -1) mag = alphas * norm_1", "detected (Hz) (Default: ``85``). freq_high (int, optional): Highest frequency that", "Frequency in Mels \"\"\" if mel_scale not in ['slaney', 'htk']:", "Default: ``True`` pad_mode (string, optional): controls the padding method used", "dtype=dtype) # Per batch example masking specgrams = specgrams.transpose(axis, -1)", "True, pad_mode: str = \"reflect\", onesided: bool = True )", "<NAME>, <NAME>, and <NAME>. \"librosa: Audio and music signal analysis", "1) values, _ = torch.median(roll, -1) return values def detect_pitch_frequency(", "\"\"\" lag_min = int(math.ceil(sample_rate / freq_high)) # Find near enough", "Number of columns to be masked will be uniformly sampled", "(default: 0.005) nccf_ballast (float, optional): Increasing this factor reduces NCCF", "float, top_db: Optional[float] = None ) -> Tensor: r\"\"\"Turn a", "previous iterate tprev = rebuilt # Invert with our current", "(bool): When True, both the input and output Tensor have", "a filterbank so that assuming there is a matrix A", "Returns: Tensor: Power of the normed input tensor. Shape of", "-> Tensor: r\"\"\" Apply a mask along ``axis``. Mask will", "speech recognition <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME> 2014", "complex_tensor: Tensor ) -> Tensor: r\"\"\"Compute the angle of complex", "mels # And now the nonlinear scale min_log_hz = 1000.0", "tensor, speed up in time without modifying pitch by a", "(int): Sample rate of the audio waveform. format (str): File", "k) # size (n_mfcc, n_mels) if norm is None: dct", "sliding_window_cmn( waveform: Tensor, cmn_window: int = 600, min_cmn_window: int =", "= complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor,", "= waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default values are", "Compute lags output_lag = [] for lag in range(1, lags", "int(math.ceil(sample_rate / freq_high)) # Find near enough max that is", "def sliding_window_cmn( waveform: Tensor, cmn_window: int = 600, min_cmn_window: int", "(..., time), where time equals the ``length`` parameter if given.", "if not x.is_floating_point(): x = x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype)", "axis (int): Axis to apply masking on (1 -> frequency,", "energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\" EPSILON = 10 ** (-9) #", "x_db.size() packed_channels = shape[-3] if x_db.dim() > 2 else 1", "- cur_sum / window_frames if norm_vars: if window_frames == 1:", "0'.format(momentum) if normalized: warnings.warn( \"The argument normalized is not used", "median smoothing, and convert to frequency. Note: If the max", "consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length,", "Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)", "encoding: Optional[str] = None, bits_per_sample: Optional[int] = None, ) ->", "returns a signal scaled between -1 and 1. Args: x_mu", "be > 0) e.g., 1 for energy, 2 for power,", "mel bin numbers to frequencies. Args: mels (Tensor): Mel frequencies", "``n_freqs``), the applied result would be ``A * create_fb_matrix(A.size(-1), ...)``.", "Tensor: Angle of a complex tensor. Shape of `(..., )`", "/ window_frames variance -= ((cur_sum ** 2) / (window_frames **", "optional): Only relevant for compatibility with online pitch extraction. A", "min_log_mel = (min_log_hz - f_min) / f_sp logstep = math.log(6.4)", "Tensor: r\"\"\"Given a STFT tensor, speed up in time without", "= x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2,", "upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, ) result = result.reshape(shape[:-1]", "scaled by. power (float): If power equals 1, will compute", "supported') # unpack batch specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) return", "(str or None): Norm to use (either 'ortho' or None)", "length of a frame, :math:`b_i` is the beginning of frame", "int, win_length: int, power: Optional[float], normalized: bool, center: bool =", "values are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft,", "Lowest frequency that can be detected (Hz) (Default: ``85``). freq_high", "pad :attr:`waveform` on both sides so that the :math:`t`-th frame", "pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0,", "after stft. n_iter (int): Number of iteration for phase recovery", "complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)` rate (float):", "is not None: # Expand batch shape = x_db.size() packed_channels", "slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels) fb = torch.max(zero,", "<NAME>, <NAME>, <NAME>, <NAME> and <NAME> 2014 IEEE International Conference", "(int): Number of iteration for phase recovery process. momentum (float):", "n_mels: int, sample_rate: int, norm: Optional[str] = None, mel_scale: str", "mask_value elif axis == 2: specgram[:, :, mask_start:mask_end] = mask_value", "Take value from first if bigger than a multiplicative factor", "indices def _find_max_per_frame( nccf: Tensor, sample_rate: int, freq_high: int )", "dimensions (channel, freq, time) \"\"\" # pack batch shape =", ") -> Tensor: r\"\"\"Create a spectrogram or a batch of", "`1.0`) Returns: (Tensor, Tensor): The magnitude and phase of the", "f_min = 0.0 f_sp = 200.0 / 3 freqs =", "to retain n_mels (int): Number of mel filterbanks norm (str", "the last dimension corresponds to pitch and NCCF. Reference: -", "600, min_cmn_window: int = 100, center: bool = False, norm_vars:", "dimension (..., time) sample_rate (int): Sample rate of the audio", "results to avoid redundancy. Default: ``True`` Returns: Tensor: Dimension (...,", "r\"\"\"Turn a spectrogram from the power/amplitude scale to the decibel", "cur_sum += torch.sum(input_part, 1) if norm_vars: cur_sumsq += torch.cumsum(input_part **", "float, db_multiplier: float, top_db: Optional[float] = None ) -> Tensor:", "= torch.zeros(batch, freq, frames) angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \\", "to use (either 'ortho' or None) Returns: Tensor: The transformation", "torch.median(roll, -1) return values def detect_pitch_frequency( waveform: Tensor, sample_rate: int,", "700.0)) # Fill in the linear part f_min = 0.0", "normalization per utterance. Args: waveform (Tensor): Tensor of audio of", "(torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu return x def", "# (new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 =", "converted in Hz \"\"\" if mel_scale not in ['slaney', 'htk']:", "lags + num_of_frames * frame_size - waveform_length waveform = torch.nn.functional.pad(waveform,", "(Default: ``10 ** (-2)``). win_length (int, optional): The window length", "device=device, dtype=dtype) # Per batch example masking specgrams = specgrams.transpose(axis,", "int, power: float, normalized: bool, n_iter: int, momentum: float, length:", "torch.linspace(0, sample_rate // 2, n_freqs) # calculate mel freq bins", "from the decibel scale to the power/amplitude scale. Args: x", "unpack batch specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram def", "str = \"reflect\", onesided: bool = True ) -> Tensor:", "min_log_mel) freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))", "from ``uniform(0, max_v - v)``. Args: specgrams (Tensor): Real spectrograms", "1000) >>> delta = compute_deltas(specgram) >>> delta2 = compute_deltas(delta) \"\"\"", "window_start, :] cur_sum += torch.sum(input_part, 1) if norm_vars: cur_sumsq +=", "torch.no_grad():\" back when JIT supports it waveform = torch.nn.functional.pad(waveform, (pad,", "more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting Tensor. If ``channels_first=True``,", "frequency bin conversion matrix. Args: n_freqs (int): Number of frequencies", "torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided,", "freq_high (int, optional): Highest frequency that can be detected (Hz)", "= window_end - window_start last_window_start = window_start last_window_end = window_end", "Returns: Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``,", "etc. If None, then the complex spectrum is returned instead.", "for FO change. (default: 0.1) lowpass_cutoff (float, optional): Cutoff frequency", "true, use a window centered on the current frame (to", "window_start = 0 if not center: if window_end > t:", "2 for power, etc. If None, then the complex spectrum", ":math:`\\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,", "filterbank construction by Librosa all_freqs = torch.linspace(0, sample_rate // 2,", "size (..., ``n_freqs``), the applied result would be ``A *", "cepstral mean (and optionally variance) normalization per utterance. Args: waveform", "device=device, dtype=dtype) * (specgrams.size(axis) - value) # Create broadcastable mask", "v0.9.0 release. To suppress this warning, \" \"please use `normalized=False`.\")", "// 2, device=specgram.device).reshape((-1, 1)) freq_dim = -2 return (freqs *", "optional): Frequency that we down-sample the signal to. Must be", "1.0) # Fill in the linear scale f_min = 0.0", "to be masked will be uniformly sampled from [0, mask_param]", "packed_channels = shape[-3] if x_db.dim() > 2 else 1 x_db", "> 1 can be unstable'.format(momentum) assert momentum >= 0, 'momentum={}", "multiplier: float, amin: float, db_multiplier: float, top_db: Optional[float] = None", "* b[0]) values = mask * a[0] + ~mask *", "energy. Relevant if ``frames_per_chunk > 0``. (default: 500) snip_edges (bool,", "+ f_sp * mels # And now the nonlinear scale", "int = 500, snip_edges: bool = True, ) -> torch.Tensor:", "power (float): Power of the norm. (Default: `1.0`) Returns: (Tensor,", "Args: specgram (Tensor): Tensor of audio of dimension (..., freq,", "-= ((cur_sum ** 2) / (window_frames ** 2)) variance =", "`librosa`. * [1] McFee, Brian, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,", "(in number of frames) (Default: ``30``). freq_low (int, optional): Lowest", "will output features that correspond to what an online decoder", "// 2 + 1`` bins hop_length (int): Length of hop", "window_start < 0: window_start = 0 if last_window_start == -1:", "normalized (bool): Whether to normalize by magnitude after stft. n_iter", "simulate_first_pass_online: bool = False, recompute_frame: int = 500, snip_edges: bool", "frequency (Hz) f_max (float): Maximum frequency (Hz) n_mels (int): Number", "(..., ``n_freqs``), the applied result would be ``A * create_fb_matrix(A.size(-1),", "10 ** (-2), win_length: int = 30, freq_low: int =", "applied result would be ``A * create_fb_matrix(A.size(-1), ...)``. \"\"\" if", "time :math:`t \\times \\text{hop\\_length}`. Default: ``True`` pad_mode (string, optional): controls", "x: Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Encode signal based", "= complex_specgrams.index_select(-2, (time_steps + 1).long()) angle_0 = angle(complex_specgrams_0) angle_1 =", "f\"Or, the value for `n_freqs` ({n_freqs}) may be set too", "twice sum of integer squared denom = n * (n", "= lags + num_of_frames * frame_size - waveform_length waveform =", "detected (Hz) (Default: ``3400``). Returns: Tensor: Tensor of freq of", "<NAME>., & <NAME>. \"A fast Griffin-Lim algorithm,\" IEEE Workshop on", "creates ``n_fft // 2 + 1`` bins hop_length (int): Length", "power: float ) -> Tensor: r\"\"\"Turn a tensor from the", "torch import Tensor from torchaudio._internal import module_utils as _mod_utils import", "(bool, optional): whether to pad :attr:`waveform` on both sides so", "# Repack batch x_db = x_db.reshape(shape) return x_db def DB_to_amplitude(", "be set too high. \" f\"Or, the value for `n_freqs`", "Tensor of audio of dimension (..., time) pad (int): Two", "= (min_value + value)[..., None, None] mask = torch.arange(0, specgrams.size(axis),", "to padding (Default: ``\"replicate\"``) Returns: Tensor: Tensor of deltas of", "mask_start:mask_end] = mask_value elif axis == 2: specgram[:, :, mask_start:mask_end]", "rebuilt # Invert with our current estimate of the phases", "to avoid redundancy. Default: ``True`` Returns: Tensor: Dimension (..., freq,", "sampled from [0, mask_param] mask_value (float): Value to assign to", "hop_length=hop_length, win_length=win_length, window=window, length=length).float() # Rebuild the spectrogram rebuilt =", "features, which is the default. (default: False) Relevant if ``frames_per_chunk", "of Fourier bins, and time is the number of window", "that determines filter width when upsampling NCCF. (default: 5) max_frames_latency", "the form `(batch, channel, freq, time)`. multiplier (float): Use 10.", "int = 1, upsample_filter_width: int = 5, max_frames_latency: int =", "format (str): File format. channels_first (bool): When True, both the", "batch shape = list(waveform.size()) waveform = waveform.reshape([-1] + shape[-1:]) nccf", "mask mask_start = min_value[..., None, None] mask_end = (min_value +", "= 1.0 ) -> Tuple[Tensor, Tensor]: r\"\"\"Separate a complex-valued spectrogram", "Frequency and Time masking are supported') # unpack batch specgram", "list(shape[-2:])) value = torch.rand(1) * mask_param min_value = torch.rand(1) *", "Window tensor that is applied/multiplied to each frame/window n_fft (int):", "specgram.pow(1 / power) # randomly initialize the phase batch, freq,", "delta_pitch: float = 0.005, nccf_ballast: float = 7000, lowpass_filter_width: int", "Changes the bit depth for the supported formats. For more", "angle(complex_specgrams[..., :1, :]) # Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0,", "mask_value) specgrams = specgrams.transpose(axis, -1) return specgrams def mask_along_axis( specgram:", "* (specgrams.size(axis) - value) # Create broadcastable mask mask_start =", "STFT tensor, speed up in time without modifying pitch by", "dimension ``[channel, time]``. Otherwise, they have dimension ``[time, channel]``. compression", "Tensor: r\"\"\"Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),", "freq, time) cmn_window (int, optional): Window in frames for running", "norm == \"slaney\": # Slaney-style mel is scaled to be", "thresh: float = 0.99 ) -> Tuple[Tensor, Tensor]: \"\"\" Take", "specgram = specgram.pow(1 / power) # randomly initialize the phase", "hop_length: int, win_length: int, power: Optional[float], normalized: bool, center: bool", "(mels[log_t] - min_log_mel)) return freqs def create_fb_matrix( n_freqs: int, f_min:", "= 10 ** (-9) freq = sample_rate / (EPSILON +", "= [ \"spectrogram\", \"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\",", "that tensor, and so may return different values for an", "may return different values for an audio clip split into", "Tensor: r\"\"\"Encode signal based on mu-law companding. For more info", "very close to the first half of lags, then the", "Initializes phase randomly if True, to zero otherwise. Returns: torch.Tensor:", "= min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel)) return freqs", "lag_min:], -1) half_size = nccf.shape[-1] // 2 half = torch.max(nccf[...,", "batch shape = specgram.size() specgram = specgram.reshape(1, -1, shape[-1]) assert", "+= lag_min # Add 1 empirical calibration offset indices +=", "Tensor ) -> Tensor: r\"\"\"Compute the angle of complex tensor", "torch.exp(logstep * (mels[log_t] - min_log_mel)) return freqs def create_fb_matrix( n_freqs:", "sample_rate (int): The sample rate of the waveform (Hz) frame_time", ":math:`i`, :math:`E(j)` is the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\" EPSILON =", "The spectral centroid is defined as the weighted average of", "Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2]) #", "< mask_end), mask_value) specgrams = specgrams.transpose(axis, -1) return specgrams def", "sample_rate: int, frame_time: float = 10 ** (-2), win_length: int", "hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, sample_rate // 2,", "conference, pp. 18-25. 2015. * [2] <NAME>., <NAME>., & <NAME>.", "frame_size = int(math.ceil(sample_rate * frame_time)) waveform_length = waveform.size()[-1] num_of_frames =", "frequency that can be detected (Hz) (Default: ``85``). freq_high (int,", "-> Tensor: r\"\"\"Compute the angle of complex tensor input. Args:", "lowpass_filter_width: int = 1, upsample_filter_width: int = 5, max_frames_latency: int", "bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first, format=format)", "window_end = num_frames if window_start < 0: window_start = 0", "= torch.zeros( num_channels, num_feats, dtype=dtype, device=device) else: variance = cur_sumsq", "and Acoustics (pp. 1-4), Oct. 2013. * [3] <NAME> and", "at time :math:`t`, :math:`c_t` is the spectrogram coeffcients at time", "Griffin-Lim transformation. Implementation ported from `librosa`. * [1] McFee, Brian,", "mask interval. Args: specgram (Tensor): Real spectrogram (channel, freq, time)", "pad: int, window: Tensor, n_fft: int, hop_length: int, win_length: int,", "optionally variance) normalization per utterance. Args: waveform (Tensor): Tensor of", "waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift, min_f0,", "nccf_ballast (float, optional): Increasing this factor reduces NCCF for quiet", "false, window is to the left. (bool, default = false)", "1.0 x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0)", "window_start + cmn_window else: window_start = t - cmn_window window_end", "LowPass filter (Hz) (default: 1000) resample_frequency (float, optional): Frequency that", "= complex_norm(complex_tensor, power) phase = angle(complex_tensor) return mag, phase def", ":attr:`center` is ``True``. Default: ``\"reflect\"`` onesided (bool, optional): controls whether", "Tensor of audio of dimension (..., freq, time) sample_rate (int):", "will be removed in v0.9.0 release. To suppress this warning,", "= math.log(6.4) / 27.0 if freq >= min_log_hz: mels =", "* torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier * db_multiplier if top_db", "highlight/apply f_min (float): Minimum frequency (Hz) f_max (float): Maximum frequency", "(Hz) f_max (float): Maximum frequency (Hz) n_mels (int): Number of", "to each frame/window n_fft (int): Size of FFT, creates ``n_fft", "complex_tensor: Tensor, power: float = 1.0 ) -> Tuple[Tensor, Tensor]:", "= quantization_channels - 1.0 if not x.is_floating_point(): x = x.to(torch.float)", "of a tensor, usually a spectrogram: .. math:: d_t =", "for compatibility with online pitch extraction. A non-critical parameter; the", "_hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max, n_mels + 2) f_pts", "from torch import Tensor from torchaudio._internal import module_utils as _mod_utils", "100, center: bool = False, norm_vars: bool = False, )", "overlapping triangles zero = torch.zeros(1) down_slopes = (-1.0 * slopes[:,", "n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, )", "n = (win_length - 1) // 2 # twice sum", "+ spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if power is", "iteration for phase recovery process. momentum (float): The momentum parameter", "# pack batch shape = specgram.size() specgram = specgram.reshape([-1] +", "output only if ``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``) (default: 0)", "search for (Hz) (default: 400.0) soft_min_f0 (float, optional): Minimum f0,", "center: if window_end > t: window_end = max(t + 1,", "200.0 / 3 mels = (freq - f_min) / f_sp", "default = false) norm_vars (bool, optional): If true, normalize variance", "shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1,", "= torch.linspace(m_min, m_max, n_mels + 2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)", "Args: complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)` rate", "center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack batch spec_f", ":py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional): Changes the encoding for the supported", "frame-shift. This makes different types of features give the same", "have dimension ``[channel, time]``. Otherwise, they have dimension ``[time, channel]``.", "equivalent of `compute-kaldi-pitch-feats` from Kaldi. Args: waveform (Tensor): The input", "specgram: Tensor, mask_param: int, mask_value: float, axis: int ) ->", "the second, elementwise. \"\"\" mask = (a[0] > thresh *", "the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int,", "frames is the file size divided by the frame-shift. This", "recompute some of the forward pointers, after revising our estimate", "Tensor: r\"\"\" Apply a mask along ``axis``. Mask will be", "= torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1) phase", "of frames. (default: True) Returns: Tensor: Pitch feature. Shape: ``(batch,", "size power (float or None): Exponent for the magnitude spectrogram,", "pass of decoding -- not the final version of the", "-> Tensor: r\"\"\" Apply sliding-window cepstral mean (and optionally variance)", "\"\"\" x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier", "r\"\"\" Compute the spectral centroid for each channel along the", "mel is scaled to be approx constant energy per channel", "freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape", "Tensor: Tensor of deltas of dimension (..., freq, time) Example", "= f_pts[1:] - f_pts[:-1] # (n_mels + 1) slopes =", "scale. ref (float): Reference which the output will be scaled", "waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] output_frames = ( (s1", "magnitude spectrogram using the Griffin-Lim transformation. Implementation ported from `librosa`.", "Tensor): The magnitude and phase of the complex tensor \"\"\"", "\"htk\" or \"slaney\".') if mel_scale == \"htk\": return 700.0 *", "2 else 1 x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db", "\"\"\" # pack batch shape = specgram.size() specgram = specgram.reshape([-1]", "waveform: Tensor, sample_rate: int, format: str, channels_first: bool = True,", "of None or 'slaney'\") # freq bins # Equivalent filterbank", "return fb def create_dct( n_mfcc: int, n_mels: int, norm: Optional[str]", "shape = x_db.size() packed_channels = shape[-3] if x_db.dim() > 2", "that determines filter width of lowpass filter, more gives sharper", "CMN computation (int, default = 600) min_cmn_window (int, optional): Minimum", "(Tensor): Mel frequencies mel_scale (str, optional): Scale to use: ``htk``", "* norm_1 + (1 - alphas) * norm_0 real_stretch =", "algorithm,\" IEEE Workshop on Applications of Signal Processing to Audio", ") -> Tensor: r\"\"\" Apply sliding-window cepstral mean (and optionally", "whether to return half of results to avoid redundancy. Default:", "else: raise ValueError('Only Frequency and Time masking are supported') #", "cur_sum += frame_to_add if norm_vars: cur_sumsq += (frame_to_add ** 2)", "output = output.reshape(shape) return output def _compute_nccf( waveform: Tensor, sample_rate:", "frame_to_add = waveform[:, last_window_end, :] cur_sum += frame_to_add if norm_vars:", "to power/amplitude scale. ref (float): Reference which the output will", "dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) *", "= multiplier * torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier * db_multiplier", "list(freq.shape[-1:])) return freq def sliding_window_cmn( waveform: Tensor, cmn_window: int =", "Cutoff frequency for LowPass filter (Hz) (default: 1000) resample_frequency (float,", "augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float, frame_length: float", "x_mu def mu_law_decoding( x_mu: Tensor, quantization_channels: int ) -> Tensor:", "the same number of frames. (default: True) Returns: Tensor: Pitch", "= (mels >= min_log_mel) freqs[log_t] = min_log_hz * torch.exp(logstep *", "Tensor: Input after mu-law decoding \"\"\" mu = quantization_channels -", "\"\"\" specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1.,", "a batch depends on the maximum value of that tensor,", "redundancy. Default: ``True`` Returns: Tensor: Dimension (..., freq, time), freq", "groups=specgram.shape[1]) / denom # unpack batch output = output.reshape(shape) return", "frames. (default: True) Returns: Tensor: Pitch feature. Shape: ``(batch, frames", "torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier * db_multiplier if top_db is", ">>> complex_specgrams = torch.randn(2, freq, 300, 2) >>> rate =", "issue is fixed # https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def", "-> frequency, 2 -> time) Returns: Tensor: Masked spectrogram of", "time)`. sample_rate (float): Sample rate of `waveform`. frame_length (float, optional):", "(-9) # Number of lags to check lags = int(math.ceil(sample_rate", "torch.rand(1) * (specgram.size(axis) - value) mask_start = (min_value.long()).squeeze() mask_end =", "``A * create_fb_matrix(A.size(-1), ...)``. \"\"\" if norm is not None", "10.1109/ICASSP.2014.6854049. \"\"\" shape = waveform.shape waveform = waveform.reshape(-1, shape[-1]) result", "zero values. \" f\"The value for `n_mels` ({n_mels}) may be", "\"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\", \"angle\", \"magphase\", \"phase_vocoder\",", "convergence, but above 1 may not converge. length (int or", "math.log(6.4) / 27.0 if freq >= min_log_hz: mels = min_log_mel", "shape = specgram.size() specgram = specgram.reshape(1, -1, shape[-1]) assert win_length", "int, sample_rate: int, norm: Optional[str] = None, mel_scale: str =", "= 7000, lowpass_filter_width: int = 1, upsample_filter_width: int = 5,", "JIT supports it waveform = torch.nn.functional.pad(waveform, (pad, pad), \"constant\") #", "n_mels: int, norm: Optional[str] ) -> Tensor: r\"\"\"Create a DCT", "variance to one. (bool, default = false) Returns: Tensor: Tensor", "same number of frames. (default: True) Returns: Tensor: Pitch feature.", "def angle( complex_tensor: Tensor ) -> Tensor: r\"\"\"Compute the angle", "This makes different types of features give the same number", "window_start = 0 window_end = 0 if center: window_start =", "Args: mels (Tensor): Mel frequencies mel_scale (str, optional): Scale to", "normalized is not used in Griffin-Lim, \" \"and will be", "\"replicate\" ) -> Tensor: r\"\"\"Compute delta coefficients of a tensor,", "norm_vars (bool, optional): If true, normalize variance to one. (bool,", "spectrogram, (must be > 0) e.g., 1 for energy, 2", "+ 1`` bins hop_length (int): Length of hop between STFT", "window_start > last_window_start: frame_to_remove = waveform[:, last_window_start, :] cur_sum -=", "== 2: cmn_waveform = cmn_waveform.squeeze(0) return cmn_waveform def spectral_centroid( waveform:", "(int): Sample rate of the audio waveform pad (int): Two", "2 dimensional. See also ```channels_first```. sample_rate (int): Sample rate of", "the output will be scaled by. power (float): If power", "+ 2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) # calculate the difference", "complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps = torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype)", "best) indices = best[1] # Add back minimal lag indices", "filterbanks. Each column is a filterbank so that assuming there", "optional): If true, the function will output features that correspond", "# Fill in the linear part f_min = 0.0 f_sp", "\"\"\"Convert mel bin numbers to frequencies. Args: mels (Tensor): Mel", "2 * mu + 0.5).to(torch.int64) return x_mu def mu_law_decoding( x_mu:", "Highest frequency that can be detected (Hz) (Default: ``3400``). Returns:", "Tensor: r\"\"\" Apply median smoothing to the 1D tensor over", "momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum) assert", "minimal lag indices += lag_min # Add 1 empirical calibration", "1).long()) angle_0 = angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0,", "/ 27.0 if freq >= min_log_hz: mels = min_log_mel +", "mode: str = \"replicate\" ) -> Tensor: r\"\"\"Compute delta coefficients", "`(..., complex=2)` power (float): Power of the norm. (Default: `1.0`)", "mask_param min_value = torch.rand(1) * (specgram.size(axis) - value) mask_start =", "Sample rate of the audio waveform pad (int): Two sided", "energy, 2 for power, etc. If None, then the complex", "supported') device = specgrams.device dtype = specgrams.dtype value = torch.rand(specgrams.shape[:2],", "time :math:`t`, :math:`c_t` is the spectrogram coeffcients at time :math:`t`,", "lag_min = int(math.ceil(sample_rate / freq_high)) # Find near enough max", "** 2) window_frames = window_end - window_start last_window_start = window_start", "n_mels) if norm is None: dct *= 2.0 else: assert", "x), power) def _hz_to_mel(freq: float, mel_scale: str = \"htk\") ->", "t: window_end = max(t + 1, min_cmn_window) if window_end >", "the final version of the features, which is the default.", "sample_rate: int, freq_high: int ) -> Tensor: r\"\"\" For each", "ending edge won't be snipped, so that the number of", "the input and output Tensor have dimension ``[channel, time]``. Otherwise,", "center == false, ignored if center==true (int, default = 100)", "scale. The output of each tensor in a batch depends", "been scaled to between -1 and 1 and returns a", "in each bin. Dimension of (freq, 1) Returns: Tensor: Complex", "= torch.pow(variance, -0.5) cmn_waveform[:, t, :] *= variance cmn_waveform =", "10.0, min_f0: float = 50, max_f0: float = 400, soft_min_f0:", "f_diff[1:] # (n_freqs, n_mels) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) if", "(Default: ``htk``) Returns: Tensor: Triangular filter banks (fb matrix) of", "freq_low) indices = _find_max_per_frame(nccf, sample_rate, freq_high) indices = _median_smoothing(indices, win_length)", "IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),", "only if ``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk", "specgram = torch.randn(1, 40, 1000) >>> delta = compute_deltas(specgram) >>>", "optional): Changes the encoding for the supported formats. For more", "Number of frequencies to highlight/apply f_min (float): Minimum frequency (Hz)", "power) phase = angle(complex_tensor) return mag, phase def phase_vocoder( complex_specgrams:", "(int): Length of hop between STFT windows win_length (int): Window", "return freqs def create_fb_matrix( n_freqs: int, f_min: float, f_max: float,", "3 -> time) Returns: Tensor: Masked spectrograms of dimensions (batch,", "win_length (int, optional): The window length used for computing delta", "2, 1)[:, -1, :] else: if window_start > last_window_start: frame_to_remove", "used for energy normalization. (default: 0) simulate_first_pass_online (bool, optional): If", "Tensor, window: Tensor, n_fft: int, hop_length: int, win_length: int, power:", "first pass of decoding -- not the final version of", "- angle_0 - phase_advance phase = phase - 2 *", "encoding for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.", "at frame :math:`i` with lag :math:`m`, :math:`w` is the waveform,", "t, :] = torch.zeros( num_channels, num_feats, dtype=dtype, device=device) else: variance", "deltas at time :math:`t`, :math:`c_t` is the spectrogram coeffcients at", "compute_deltas( specgram: Tensor, win_length: int = 5, mode: str =", "of ``rate``. Args: complex_specgrams (Tensor): Dimension of `(..., freq, time,", "axis == 1: specgram[:, mask_start:mask_end] = mask_value elif axis ==", "0.0 f_sp = 200.0 / 3 freqs = f_min +", "= (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)", "specgram.size() if rand_init: angles = 2 * math.pi * torch.rand(batch,", "format. channels_first (bool): When True, both the input and output", "shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps =", "audio signal. The spectrogram can be either magnitude-only or complex.", "* torch.log1p(mu)) - 1.0) / mu return x def complex_norm(", "num_of_frames = int(math.ceil(waveform_length / frame_size)) p = lags + num_of_frames", "20. for amplitude amin (float): Number to clamp ``x`` db_multiplier", ") # unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if", ") -> Tensor: r\"\"\"Turn a spectrogram from the power/amplitude scale", "length used for computing delta (Default: ``5``) mode (str, optional):", "of dimension (..., freq, time) Example >>> specgram = torch.randn(1,", "waveform, sample_rate, frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency,", "assert win_length >= 3 n = (win_length - 1) //", "deltas of dimension (..., freq, time) Example >>> specgram =", "with values from 0 to quantization_channels - 1. Args: x", "Tensor of deltas of dimension (..., freq, time) Example >>>", "spec_f.abs() return spec_f.abs().pow(power) return torch.view_as_real(spec_f) def griffinlim( specgram: Tensor, window:", "cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats)) if len(input_shape) == 2: cmn_waveform =", "frames_per_chunk (int, optional): The number of frames used for energy", "Tensor, mel_scale: str = \"htk\") -> Tensor: \"\"\"Convert mel bin", "applicable if center == false, ignored if center==true (int, default", "waveform.shape[0] dtype = waveform.dtype device = waveform.device last_window_start = last_window_end", "c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2} where :math:`d_t` is the deltas at time", "= specgram.reshape([-1] + list(shape[-2:])) value = torch.rand(1) * mask_param min_value", "to Audio and Acoustics (pp. 1-4), Oct. 2013. * [3]", "complex=2)` power (float): Power of the norm. (Default: `1.0`) Returns:", "Tensor: r\"\"\" Apply sliding-window cepstral mean (and optionally variance) normalization", "Fill in the linear part f_min = 0.0 f_sp =", "(2 * n + 1) / 3 specgram = torch.nn.functional.pad(specgram,", "doi: 10.1109/ICASSP.2014.6854049. \"\"\" shape = waveform.shape waveform = waveform.reshape(-1, shape[-1])", "(time_steps + 1).long()) angle_0 = angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1) norm_0", "(Tensor): Tensor shape of `(..., complex=2)` Return: Tensor: Angle of", "lag indices += lag_min # Add 1 empirical calibration offset", "output features that correspond to what an online decoder would", "and Signal Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049.", "spectrogram (channel, freq, time) mask_param (int): Number of columns to", "Optional[float] = None ) -> Tensor: r\"\"\"Turn a spectrogram from", "= x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu) /", "p=2, dim=-1)).pow(2) / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1))", "= output.reshape(shape) return output def _compute_nccf( waveform: Tensor, sample_rate: int,", "rate (float): Speed-up factor phase_advance (Tensor): Expected phase advance in", "(min_value + value)[..., None, None] mask = torch.arange(0, specgrams.size(axis), device=device,", "= mag * torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1) #", "norm_vars: cur_sumsq += (frame_to_add ** 2) window_frames = window_end -", "applied from indices ``[v_0, v_0 + v)``, where ``v`` is", "a complex-valued spectrogram with shape `(..., 2)` into its magnitude", "denom = n * (n + 1) * (2 *", "defined as the weighted average of the frequency values, weighted", "can lead to faster convergence, but above 1 may not", "parameter; the frame at which we recompute some of the", "f_pts[:-1] # (n_mels + 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1)", "1.0 if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu = torch.tensor(mu,", "freq, hop_length = 1025, 512 >>> # (channel, freq, time,", "last_window_start: frame_to_remove = waveform[:, last_window_start, :] cur_sum -= frame_to_remove if", "the incomplete frames near the ending edge won't be snipped,", "to the first half of lags, then the latter is", "frame_time, freq_low) indices = _find_max_per_frame(nccf, sample_rate, freq_high) indices = _median_smoothing(indices,", "Tuple[Tensor, Tensor]: r\"\"\"Separate a complex-valued spectrogram with shape `(..., 2)`", "!= 2 and axis != 3: raise ValueError('Only Frequency and", "power, etc. normalized (bool): Whether to normalize by magnitude after", "norm_vars: if window_frames == 1: cmn_waveform[:, t, :] = torch.zeros(", "f_min + f_sp * mels # And now the nonlinear", "If true, the function will output features that correspond to", "Returns: Tensor: Input after mu-law decoding \"\"\" mu = quantization_channels", "(Hz) (default: 1000) resample_frequency (float, optional): Frequency that we down-sample", "Tensor ) -> Tensor: r\"\"\"Given a STFT tensor, speed up", "Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013.", "Returns: (Tensor, Tensor): The magnitude and phase of the complex", "optional): Minimum f0, applied in soft way, must not exceed", "that we down-sample the signal to. Must be more than", "(freq - f_min) / f_sp # Fill in the log-scale", "waveform (Hz) frame_time (float, optional): Duration of a frame (Default:", "waveform_length waveform = torch.nn.functional.pad(waveform, (0, p)) # Compute lags output_lag", "will compute DB to amplitude. Returns: Tensor: Output tensor in", "Input tensor quantization_channels (int): Number of channels Returns: Tensor: Input", "# Add back minimal lag indices += lag_min # Add", "specgrams (Tensor): Real spectrograms (batch, channel, freq, time) mask_param (int):", "best = _combine_max(half, best) indices = best[1] # Add back", "int, win_length: int, power: float, normalized: bool, n_iter: int, momentum:", "def magphase( complex_tensor: Tensor, power: float = 1.0 ) ->", "specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param min_value =", "/ f_sp # Fill in the log-scale part min_log_hz =", "of FFT hop_length (int): Length of hop between STFT windows", "Tensor: r\"\"\"Turn a spectrogram from the power/amplitude scale to the", "1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output = torch.nn.functional.conv1d(specgram, kernel,", "hop_length (int): Length of hop between STFT windows win_length (int):", "transform,\" IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984. Args:", "(int): Sample rate of the audio waveform norm (Optional[str]): If", "_median_smoothing( indices: Tensor, win_length: int ) -> Tensor: r\"\"\" Apply", "\\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where :math:`\\phi_i(m)`", "bool, center: bool = True, pad_mode: str = \"reflect\", onesided:", "form `(..., freq, time)`. Batched inputs should include a channel", "``None``) mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``.", "whether to pad :attr:`waveform` on both sides so that the", "0.0 f_sp = 200.0 / 3 mels = (freq -", "cmn_window: int = 600, min_cmn_window: int = 100, center: bool", "for automatic speech recognition <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and", "change in pitch that our algorithm measures. (default: 0.005) nccf_ballast", "for computing delta (Default: ``5``) mode (str, optional): Mode parameter", "can be unstable'.format(momentum) assert momentum >= 0, 'momentum={} < 0'.format(momentum)", "# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct =", "# freq bins # Equivalent filterbank construction by Librosa all_freqs", "freq, time) Example >>> specgram = torch.randn(1, 40, 1000) >>>", "= indices.unfold(-1, win_length, 1) values, _ = torch.median(roll, -1) return", "x = ((x_mu) / mu) * 2 - 1.0 x", "and axis != 3: raise ValueError('Only Frequency and Time masking", "] def spectrogram( waveform: Tensor, pad: int, window: Tensor, n_fft:", "\"\"\" if axis != 2 and axis != 3: raise", "for t in range(num_frames): window_start = 0 window_end = 0", "size (n_mfcc, n_mels) if norm is None: dct *= 2.0", "frames) angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \\ .to(dtype=specgram.dtype, device=specgram.device) specgram", "File format. channels_first (bool): When True, both the input and", "dimension (..., freq, time) Example >>> specgram = torch.randn(1, 40,", "smallest best = torch.max(nccf[..., lag_min:], -1) half_size = nccf.shape[-1] //", "slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels) up_slopes = slopes[:,", "each bin. Dimension of (freq, 1) Returns: Tensor: Complex Specgrams", "tensor quantization_channels (int): Number of channels Returns: Tensor: Input after", "the expected output. rand_init (bool): Initializes phase randomly if True,", "= waveform.dtype device = waveform.device last_window_start = last_window_end = -1", "* db_multiplier if top_db is not None: # Expand batch", "Tensor, multiplier: float, amin: float, db_multiplier: float, top_db: Optional[float] =", "scaled to be approx constant energy per channel enorm =", "a spectrogram from the power/amplitude scale to the decibel scale.", "squared denom = n * (n + 1) * (2", "3 specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel = torch.arange(-n,", "pad (int): Two sided padding of signal window (Tensor): Window", "rate = 1.3 # Speed up by 30% >>> phase_advance", "linear scale magnitude spectrogram using the Griffin-Lim transformation. Implementation ported", "in soft way, must not exceed min-f0 (default: 10.0) penalty_factor", "the log-scale part min_log_hz = 1000.0 min_log_mel = (min_log_hz -", "width of lowpass filter, more gives sharper filter. (default: 1)", "All examples will have the same mask interval. Args: specgram", "banks (fb matrix) of size (``n_freqs``, ``n_mels``) meaning number of", "frame (to the extent possible, modulo end effects). If false,", "output_frames = ( (s1 * s2).sum(-1) / (EPSILON + torch.norm(s1,", "padding of signal window (Tensor): Window tensor that is applied/multiplied", "float = 1000, resample_frequency: float = 4000, delta_pitch: float =", "for each channel along the time axis. The spectral centroid", "after mu-law decoding \"\"\" mu = quantization_channels - 1.0 if", "return freq def sliding_window_cmn( waveform: Tensor, cmn_window: int = 600,", "full clip. Args: x (Tensor): Input spectrogram(s) before being converted", "Minimum CMN window used at start of decoding (adds latency", "waveform[:, window_start: window_end - window_start, :] cur_sum += torch.sum(input_part, 1)", "4000, delta_pitch: float = 0.005, nccf_ballast: float = 7000, lowpass_filter_width:", "each channel along the time axis. The spectral centroid is", "per utterance. Args: waveform (Tensor): Tensor of audio of dimension", "(pad, pad), \"constant\") # pack batch shape = waveform.size() waveform", "m_pts = torch.linspace(m_min, m_max, n_mels + 2) f_pts = _mel_to_hz(m_pts,", "= mask_value elif axis == 2: specgram[:, :, mask_start:mask_end] =", "\"\"\" EPSILON = 10 ** (-9) # Number of lags", "the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values", ":math:`i` with lag :math:`m`, :math:`w` is the waveform, :math:`N` is", "== \"htk\": return 2595.0 * math.log10(1.0 + (freq / 700.0))", "the nonlinear scale min_log_hz = 1000.0 min_log_mel = (min_log_hz -", "# Speed up by 30% >>> phase_advance = torch.linspace( >>>", "(-9) freq = sample_rate / (EPSILON + indices.to(torch.float)) # unpack", "form `(batch, channel, freq, time)`. multiplier (float): Use 10. for", "nonlinear scale min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min)", "Tensor: r\"\"\"Create a frequency bin conversion matrix. Args: n_freqs (int):", "on (1 -> frequency, 2 -> time) Returns: Tensor: Masked", "to be approx constant energy per channel enorm = 2.0", "Optional[str] ) -> Tensor: r\"\"\"Create a DCT transformation matrix with", "calculate mel freq bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max =", "unstable'.format(momentum) assert momentum >= 0, 'momentum={} < 0'.format(momentum) if normalized:", "broadcastable mask mask_start = min_value[..., None, None] mask_end = (min_value", "last dimension corresponds to pitch and NCCF. Reference: - A", "indices to frequency EPSILON = 10 ** (-9) freq =", "> 0``. (default: 500) snip_edges (bool, optional): If this is", "# Invert with our current estimate of the phases inverse", "back minimal lag indices += lag_min # Add 1 empirical", "500, snip_edges: bool = True, ) -> torch.Tensor: \"\"\"Extract pitch", "-1) mag = alphas * norm_1 + (1 - alphas)", "spectral_centroid( waveform: Tensor, sample_rate: int, pad: int, window: Tensor, n_fft:", "real_stretch = mag * torch.cos(phase_acc) imag_stretch = mag * torch.sin(phase_acc)", ">>> delta2 = compute_deltas(delta) \"\"\" device = specgram.device dtype =", "angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length).float() # Rebuild the spectrogram", "power=1., normalized=False) freqs = torch.linspace(0, sample_rate // 2, steps=1 +", "interval. Args: specgram (Tensor): Real spectrogram (channel, freq, time) mask_param", "b[1] return values, indices def _find_max_per_frame( nccf: Tensor, sample_rate: int,", "format=format) return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float,", "to false, the incomplete frames near the ending edge won't", "waveform: Tensor, sample_rate: int, frame_time: float = 10 ** (-2),", "at time :math:`t \\times \\text{hop\\_length}`. Default: ``True`` pad_mode (string, optional):", "relevant for compatibility with online pitch extraction. A non-critical parameter;", "Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984. Args: specgram (Tensor):", "/ 1.3) torch.Size([2, 1025, 231, 2]) \"\"\" # pack batch", "+ value)[..., None, None] mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)", ") -> Tensor: r\"\"\"Compute the norm of complex tensor input.", "r\"\"\" Apply sliding-window cepstral mean (and optionally variance) normalization per", "14th python in science conference, pp. 18-25. 2015. * [2]", "f_sp = 200.0 / 3 freqs = f_min + f_sp", "& <NAME>. \"A fast Griffin-Lim algorithm,\" IEEE Workshop on Applications", "num_frames, num_feats, dtype=dtype, device=device) for t in range(num_frames): window_start =", "spectrogram with shape `(..., 2)` into its magnitude and phase.", "= specgram.size() specgram = specgram.reshape(1, -1, shape[-1]) assert win_length >=", "t + 1 if window_start < 0: window_end -= window_start", "(10.0**(mels / 2595.0) - 1.0) # Fill in the linear", "dct *= 2.0 else: assert norm == \"ortho\" dct[0] *=", "if center == false, ignored if center==true (int, default =", "p)) # Compute lags output_lag = [] for lag in", "= torch.max(zero, torch.min(down_slopes, up_slopes)) if norm is not None and", "is a matrix A of size (..., ``n_freqs``), the applied", "A magnitude-only STFT spectrogram of dimension (..., freq, frames) where", "numbers to frequencies. Args: mels (Tensor): Mel frequencies mel_scale (str,", "number of frames) (Default: ``30``). freq_low (int, optional): Lowest frequency", "2 + 1``. window (Tensor): Window tensor that is applied/multiplied", "// 2 + 1`` and ``n_fft`` is the number of", "\" \"please use `normalized=False`.\") # pack batch shape = specgram.size()", "not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype) x", "if last_window_start == -1: input_part = waveform[:, window_start: window_end -", "+ n_fft // 2, device=specgram.device).reshape((-1, 1)) freq_dim = -2 return", "optional): Changes the bit depth for the supported formats. For", "``htk`` or ``slaney``. (Default: ``htk``) Returns: freqs (Tensor): Mels converted", "if window_start < 0: window_end -= window_start window_start = 0", "\"\"\" # pack batch shape = list(waveform.size()) waveform = waveform.reshape([-1]", "torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression, format, encoding, bits_per_sample ) bytes.seek(0)", "for (Hz) (default: 400.0) soft_min_f0 (float, optional): Minimum f0, applied", "STFT windows win_length (int): Window size power (float or None):", "angles - tprev.mul_(momentum / (1 + momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles))", "# with 231 == ceil(300 / 1.3) torch.Size([2, 1025, 231,", "(Tensor): Input spectrogram(s) before being converted to decibel scale. Input", "freq_high) indices = _median_smoothing(indices, win_length) # Convert indices to frequency", "freq, ceil(time/rate), complex=2)` Example >>> freq, hop_length = 1025, 512", "math.pi * hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate,", "of window hops (n_frame). \"\"\" if pad > 0: #", "x.shape # with 231 == ceil(300 / 1.3) torch.Size([2, 1025,", "compute_deltas(delta) \"\"\" device = specgram.device dtype = specgram.dtype # pack", "or ``slaney``. (Default: ``htk``) Returns: mels (float): Frequency in Mels", "may be set too high. \" f\"Or, the value for", "torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1) / 2", "in pitch that our algorithm measures. (default: 0.005) nccf_ballast (float,", "* b[1] return values, indices def _find_max_per_frame( nccf: Tensor, sample_rate:", "and phase. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)`", "so that assuming there is a matrix A of size", "than WAV. For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional):", "2015. * [2] <NAME>., <NAME>., & <NAME>. \"A fast Griffin-Lim", "int ) -> Tensor: r\"\"\" Apply a mask along ``axis``.", "of `compute-kaldi-pitch-feats` from Kaldi. Args: waveform (Tensor): The input waveform", "phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase, -1) mag = alphas", "r\"\"\"Compute the norm of complex tensor input. Args: complex_tensor (Tensor):", ">= min_log_mel) freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] -", "is the deltas at time :math:`t`, :math:`c_t` is the spectrogram", "after mu-law encoding \"\"\" mu = quantization_channels - 1.0 if", "for running average CMN computation (int, default = 600) min_cmn_window", "this factor reduces NCCF for quiet frames (default: 7000) lowpass_filter_width", "Griffin-Lim algorithm,\" IEEE Workshop on Applications of Signal Processing to", "(ICASSP), Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. \"\"\" shape =", "freq is ``n_fft // 2 + 1``. window (Tensor): Window", "FFT, creates ``n_fft // 2 + 1`` bins hop_length (int):", "for lag in range(1, lags + 1): s1 = waveform[...,", "specgram = specgram.reshape(1, -1, shape[-1]) assert win_length >= 3 n", "dimension corresponds to pitch and NCCF. Reference: - A pitch", "-1) half_size = nccf.shape[-1] // 2 half = torch.max(nccf[..., lag_min:half_size],", "r\"\"\"Encode signal based on mu-law companding. For more info see", "frame is centered at time :math:`t \\times \\text{hop\\_length}`. Default: ``True``", "in time without modifying pitch by a factor of ``rate``.", "Tensor: Masked spectrogram of dimensions (channel, freq, time) \"\"\" #", "windows win_length (int): Window size Returns: Tensor: Dimension (..., time)", "Must be 2 dimensional. See also ```channels_first```. sample_rate (int): Sample", "freq, time) win_length (int, optional): The window length used for", "cur_sumsq += (frame_to_add ** 2) window_frames = window_end - window_start", "= x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) *", "def _median_smoothing( indices: Tensor, win_length: int ) -> Tensor: r\"\"\"", "time]`` else ``[time, channel]``. \"\"\" bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform,", "sample_rate: int, format: str, channels_first: bool = True, compression: Optional[float]", "magnitude-only or complex. Args: waveform (Tensor): Tensor of audio of", "specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram def compute_deltas( specgram: Tensor, win_length:", "= (a[0] > thresh * b[0]) values = mask *", "True ) -> Tensor: r\"\"\"Create a spectrogram or a batch", "== 0.).any(): warnings.warn( \"At least one mel filterbank has all", "waveform = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length)", "= 0.99 ) -> Tuple[Tensor, Tensor]: \"\"\" Take value from", "return torch.view_as_real(spec_f) def griffinlim( specgram: Tensor, window: Tensor, n_fft: int,", "complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long()) angle_0 =", "window_start -= (window_end - num_frames) window_end = num_frames if window_start", "e.g., 1 for energy, 2 for power, etc. normalized (bool):", "waveform, sample_rate, channels_first, compression, format, encoding, bits_per_sample ) bytes.seek(0) augmented,", "int = 5, max_frames_latency: int = 0, frames_per_chunk: int =", "1 may not converge. length (int or None): Array length", "Real spectrograms (batch, channel, freq, time) mask_param (int): Number of", "r\"\"\"Detect pitch frequency. It is implemented using normalized cross-correlation function", "0: window_end -= window_start window_start = 0 if not center:", "25.0) frame_shift (float, optional): Frame shift in milliseconds. (default: 10.0)", "DB to power. If 0.5, will compute DB to amplitude.", "near enough max that is smallest best = torch.max(nccf[..., lag_min:],", "* torch.round(phase / (2 * math.pi)) # Compute Phase Accum", "// 2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1)) freq_dim", "Returns: torch.Tensor: Resulting Tensor. If ``channels_first=True``, it has ``[channel, time]``", "output.reshape(shape) return output def _compute_nccf( waveform: Tensor, sample_rate: int, frame_time:", "/ (window_frames ** 2)) variance = torch.pow(variance, -0.5) cmn_waveform[:, t,", "- window_start last_window_start = window_start last_window_end = window_end cmn_waveform[:, t,", "frame_size, frame_size)[..., :num_of_frames, :] s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[...,", "torch.zeros( num_channels, num_frames, num_feats, dtype=dtype, device=device) for t in range(num_frames):", "(..., frame) \"\"\" # pack batch shape = list(waveform.size()) waveform", "batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def amplitude_to_DB(", "(freq, 1) Returns: Tensor: Complex Specgrams Stretch with dimension of", "== 1: specgram[:, mask_start:mask_end] = mask_value elif axis == 2:", "return spec_f.abs().pow(power) return torch.view_as_real(spec_f) def griffinlim( specgram: Tensor, window: Tensor,", "torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,", "Tensor, n_fft: int, hop_length: int, win_length: int, ) -> Tensor:", "of channels Returns: Tensor: Input after mu-law encoding \"\"\" mu", "previous iterate to 0 rebuilt = torch.tensor(0.) for _ in", "variance = torch.pow(variance, -0.5) cmn_waveform[:, t, :] *= variance cmn_waveform", "online decoder would see in the first pass of decoding", "= \"reflect\", onesided: bool = True ) -> Tensor: r\"\"\"Create", "pad), \"constant\") # pack batch shape = waveform.size() waveform =", "if top_db is not None: # Expand batch shape =", "int = 85, freq_high: int = 3400, ) -> Tensor:", "(int): Axis to apply masking on (1 -> frequency, 2", "reduces NCCF for quiet frames (default: 7000) lowpass_filter_width (int, optional):", "F0 to search for (Hz) (default: 400.0) soft_min_f0 (float, optional):", "Tensor, sample_rate: int, freq_high: int ) -> Tensor: r\"\"\" For", "(int or None): Array length of the expected output. rand_init", "(Tensor): Tensor of audio of dimension (..., freq, time) cmn_window", "(Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)", "+ 0.5) * k) # size (n_mfcc, n_mels) if norm", "int ) -> Tensor: r\"\"\" For each frame, take the", "of shape `(..., time)`. sample_rate (float): Sample rate of `waveform`.", "window_start = t - cmn_window // 2 window_end = window_start", "The transformation matrix, to be right-multiplied to row-wise data of", "once issue is fixed # https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)", "soft_min_f0 (float, optional): Minimum f0, applied in soft way, must", "``v_0`` from ``uniform(0, max_v - v)``. Args: specgrams (Tensor): Real", "multiplier * torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier * db_multiplier if", "norm (Optional[str]): If 'slaney', divide the triangular mel weights by", "f_diff[:-1] # (n_freqs, n_mels) up_slopes = slopes[:, 2:] / f_diff[1:]", "return_complex=True, ) ) # Update our phase estimates angles =", "x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1))", "nccf def _combine_max( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh:", "optional): If this is set to false, the incomplete frames", "optional): Minimum negative cut-off in decibels. A reasonable number is", "specgram.dtype # pack batch shape = specgram.size() specgram = specgram.reshape(1,", "(Tensor): Input tensor before being converted to power/amplitude scale. ref", "\"with torch.no_grad():\" back when JIT supports it waveform = torch.nn.functional.pad(waveform,", "(f_pts[2:n_mels + 2] - f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values", "\"apply_codec\", ] def spectrogram( waveform: Tensor, pad: int, window: Tensor,", "<NAME>. \"librosa: Audio and music signal analysis in python.\" In", "(bool, default = false) norm_vars (bool, optional): If true, normalize", "torch.cos(phase_acc) imag_stretch = mag * torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch],", "length for median smoothing (in number of frames) (Default: ``30``).", "spectrogram: .. math:: d_t = \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} - c_{t-n})}{2", "fb def create_dct( n_mfcc: int, n_mels: int, norm: Optional[str] )", "(int): Number of channels Returns: Tensor: Input after mu-law encoding", "time_steps % 1.0 phase_0 = angle(complex_specgrams[..., :1, :]) # Time", "torch.log1p(mu)) - 1.0) / mu return x def complex_norm( complex_tensor:", "cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats)) if len(input_shape) == 2:", "= angle(complex_tensor) return mag, phase def phase_vocoder( complex_specgrams: Tensor, rate:", "matrix with shape (``n_mels``, ``n_mfcc``), normalized depending on norm. Args:", "= t + 1 if window_start < 0: window_end -=", "= time_steps % 1.0 phase_0 = angle(complex_specgrams[..., :1, :]) #", "+ 1) / 2 * mu + 0.5).to(torch.int64) return x_mu", "tracking to introduce into the feature processing (affects output only", "augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first, format=format) return", "indices.unfold(-1, win_length, 1) values, _ = torch.median(roll, -1) return values", "time_steps = torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas = time_steps", ") output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1) return nccf def _combine_max(", "+= (frame_to_add ** 2) window_frames = window_end - window_start last_window_start", "number of frames is the file size divided by the", "indices = _median_smoothing(indices, win_length) # Convert indices to frequency EPSILON", "def compute_deltas( specgram: Tensor, win_length: int = 5, mode: str", "500) snip_edges (bool, optional): If this is set to false,", "and median smoothing. Args: waveform (Tensor): Tensor of audio of", "bool = True ) -> Tensor: r\"\"\"Create a spectrogram or", "of the audio waveform pad (int): Two sided padding of", "time is the number of window hops (n_frame). \"\"\" if", "tensor. Shape of `(..., )` \"\"\" return torch.atan2(complex_tensor[..., 1], complex_tensor[...,", "(..., time) \"\"\" specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,", "Tensor]: \"\"\" Take value from first if bigger than a", "> t: window_end = max(t + 1, min_cmn_window) if window_end", "signal scaled between -1 and 1. Args: x_mu (Tensor): Input", "lag_min # Add 1 empirical calibration offset indices += 1", "Conference on Acoustics, Speech and Signal Processing (ICASSP), Florence, 2014,", "((cur_sum ** 2) / (window_frames ** 2)) variance = torch.pow(variance,", "``n_fft // 2 + 1``. window (Tensor): Window tensor that", "stft center (bool, optional): whether to pad :attr:`waveform` on both", "- f_pts[:-1] # (n_mels + 1) slopes = f_pts.unsqueeze(0) -", "def phase_vocoder( complex_specgrams: Tensor, rate: float, phase_advance: Tensor ) ->", "return waveform def amplitude_to_DB( x: Tensor, multiplier: float, amin: float,", "the first half of lags, then the latter is taken.", "TODO add \"with torch.no_grad():\" back when JIT supports it waveform", "min-f0 (default: 10.0) penalty_factor (float, optional): Cost factor for FO", "hop_length: int, win_length: int, ) -> Tensor: r\"\"\" Compute the", "+ torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1)", "Duration of a frame (Default: ``10 ** (-2)``). win_length (int,", "(int, optional): Maximum number of frames of latency that we", "(int): Number of mel filterbanks norm (str or None): Norm", "lag_min:half_size], -1) best = _combine_max(half, best) indices = best[1] #", "int ) -> Tensor: r\"\"\" Apply median smoothing to the", "= window_start + cmn_window else: window_start = t - cmn_window", "features give the same number of frames. (default: True) Returns:", "torch.norm(complex_specgrams_1, p=2, dim=-1) phase = angle_1 - angle_0 - phase_advance", "the decibel scale to the power/amplitude scale. Args: x (Tensor):", "center: bool = True, pad_mode: str = \"reflect\", onesided: bool", "that our algorithm measures. (default: 0.005) nccf_ballast (float, optional): Increasing", "(pad_length, 0), mode=\"constant\", value=0. ) indices[..., :pad_length] = torch.cat(pad_length *", "/ f_diff[:-1] # (n_freqs, n_mels) up_slopes = slopes[:, 2:] /", "of the norm. (Default: `1.0`) Returns: (Tensor, Tensor): The magnitude", "mask_end = (min_value.long() + value.long()).squeeze() assert mask_end - mask_start <", "= 10.0, min_f0: float = 50, max_f0: float = 400,", "Apply codecs as a form of augmentation. Args: waveform (Tensor):", "x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) /", "'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\", ] def spectrogram( waveform: Tensor,", "lags output_lag = [] for lag in range(1, lags +", "of audio of dimension (..., freq, time) cmn_window (int, optional):", "normalized: spec_f /= window.pow(2.).sum().sqrt() if power is not None: if", "optional): The window length for median smoothing (in number of", "Tuple[Tensor, Tensor]: \"\"\" Take value from first if bigger than", "torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1) phase =", "output. rand_init (bool): Initializes phase randomly if True, to zero", "frame, :math:`b_i` is the beginning of frame :math:`i`, :math:`E(j)` is", "norm != \"slaney\": raise ValueError(\"norm must be one of None", "angles = rebuilt if momentum: angles = angles - tprev.mul_(momentum", "complex. Args: waveform (Tensor): Tensor of audio of dimension (...,", "2: cmn_waveform = cmn_waveform.squeeze(0) return cmn_waveform def spectral_centroid( waveform: Tensor,", "to introduce into the feature processing (affects output only if", "= specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) value = torch.rand(1)", "> 0``. recompute_frame (int, optional): Only relevant for compatibility with", "pitch that our algorithm measures. (default: 0.005) nccf_ballast (float, optional):", "for median smoothing (in number of frames) (Default: ``30``). freq_low", "details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional): Changes the encoding for", "+ v)``, where ``v`` is sampled from ``uniform(0, mask_param)``, and", "= \\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2} where :math:`d_t`", "`n_mels` ({n_mels}) may be set too high. \" f\"Or, the", "(Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```.", "(default: 5) max_frames_latency (int, optional): Maximum number of frames of", "A non-critical parameter; the frame at which we recompute some", "decoder would see in the first pass of decoding --", "penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online,", "(Default: ``htk``) Returns: mels (float): Frequency in Mels \"\"\" if", "``\"replicate\"``) Returns: Tensor: Tensor of deltas of dimension (..., freq,", "onesided: bool = True ) -> Tensor: r\"\"\"Create a spectrogram", "DB to amplitude. Returns: Tensor: Output tensor in power/amplitude scale.", "-*- coding: utf-8 -*- import io import math import warnings", "is the number of Fourier bins, and time is the", "Power of the normed input tensor. Shape of `(..., )`", "now the nonlinear scale min_log_hz = 1000.0 min_log_mel = (min_log_hz", "`(..., 2)` into its magnitude and phase. Args: complex_tensor (Tensor):", "bool = False, ) -> Tensor: r\"\"\" Apply sliding-window cepstral", "optional): Highest frequency that can be detected (Hz) (Default: ``3400``).", "Two sided padding of signal window (Tensor): Window tensor that", "torch.zeros(batch, freq, frames) angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \\ .to(dtype=specgram.dtype,", "\"At least one mel filterbank has all zero values. \"", "and returns a signal encoded with values from 0 to", "= torch.randn(2, freq, 300, 2) >>> rate = 1.3 #", "# Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])", "channel]``. compression (float): Used for formats other than WAV. For", "= (win_length - 1) // 2 # \"replicate\" padding in", "0) frames_per_chunk (int, optional): The number of frames used for", "+ (1 - alphas) * norm_0 real_stretch = mag *", "Tensor, pad: int, window: Tensor, n_fft: int, hop_length: int, win_length:", "smoothing, and convert to frequency. Note: If the max among", "and output Tensor have dimension ``[channel, time]``. Otherwise, they have", "min_cmn_window (int, optional): Minimum CMN window used at start of", "depends on the maximum value of that tensor, and so", "removed in v0.9.0 release. To suppress this warning, \" \"please", "b[0]) values = mask * a[0] + ~mask * b[0]", "If true, normalize variance to one. (bool, default = false)", "the difference between each mel point and each stft freq", "the linear part f_min = 0.0 f_sp = 200.0 /", "the normed input tensor. Shape of `(..., )` \"\"\" #", "torch.pow(variance, -0.5) cmn_waveform[:, t, :] *= variance cmn_waveform = cmn_waveform.view(input_shape[:-2]", "of dimension (..., freq, time) sample_rate (int): The sample rate", "k = torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi / float(n_mels) * (n", "window: Tensor, n_fft: int, hop_length: int, win_length: int, ) ->", "complex=2)` Return: Tensor: Angle of a complex tensor. Shape of", "Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)` Example >>>", "1 can lead to faster convergence, but above 1 may", "_hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max,", "mask = (a[0] > thresh * b[0]) values = mask", "be either magnitude-only or complex. Args: waveform (Tensor): Tensor of", "of augmentation. Args: waveform (Tensor): Audio data. Must be 2", "// 2 window_end = window_start + cmn_window else: window_start =", "int, ) -> Tensor: r\"\"\" Compute the spectral centroid for", "used for computing delta (Default: ``5``) mode (str, optional): Mode", "(float): Log10(max(reference value and amin)) top_db (float or None, optional):", "= torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu", "db_multiplier (float): Log10(max(reference value and amin)) top_db (float or None,", "clamp ``x`` db_multiplier (float): Log10(max(reference value and amin)) top_db (float", "integer squared denom = n * (n + 1) *", "Hz to Mels. Args: freqs (float): Frequencies in Hz mel_scale", "Frequencies in Hz mel_scale (str, optional): Scale to use: ``htk``", "smoothing (in number of frames) (Default: ``30``). freq_low (int, optional):", "phase_advance: Tensor ) -> Tensor: r\"\"\"Given a STFT tensor, speed", "mu-law decoding \"\"\" mu = quantization_channels - 1.0 if not", "- cmn_window // 2 window_end = window_start + cmn_window else:", "cmn_waveform[:, t, :] = torch.zeros( num_channels, num_feats, dtype=dtype, device=device) else:", "of Signal Processing to Audio and Acoustics (pp. 1-4), Oct.", "optional): Maximum F0 to search for (Hz) (default: 400.0) soft_min_f0", "= torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect',", "specgrams: Tensor, mask_param: int, mask_value: float, axis: int ) ->", "uniformly sampled from [0, mask_param] mask_value (float): Value to assign", "filterbanks sample_rate (int): Sample rate of the audio waveform norm", "weighted by their magnitude. Args: waveform (Tensor): Tensor of audio", "specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False)", "= _compute_nccf(waveform, sample_rate, frame_time, freq_low) indices = _find_max_per_frame(nccf, sample_rate, freq_high)", "2) # create overlapping triangles zero = torch.zeros(1) down_slopes =", "is the number of window hops (n_frame). \"\"\" if pad", "waveform.device last_window_start = last_window_end = -1 cur_sum = torch.zeros(num_channels, num_feats,", "0, simulate_first_pass_online: bool = False, recompute_frame: int = 500, snip_edges:", "spectrogram of dimensions (channel, freq, time) \"\"\" # pack batch", "optional): Mode parameter passed to padding (Default: ``\"replicate\"``) Returns: Tensor:", "torch.Tensor: Resulting Tensor. If ``channels_first=True``, it has ``[channel, time]`` else", "If None, then the complex spectrum is returned instead. normalized", "+ value.long()).squeeze() assert mask_end - mask_start < mask_param if axis", "magnitude after stft center (bool, optional): whether to pad :attr:`waveform`", "modifying pitch by a factor of ``rate``. Args: complex_specgrams (Tensor):", "num_of_frames * frame_size - waveform_length waveform = torch.nn.functional.pad(waveform, (0, p))", "check lags = int(math.ceil(sample_rate / freq_low)) frame_size = int(math.ceil(sample_rate *", "math:: \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where", "mu) * 2 - 1.0 x = torch.sign(x) * (torch.exp(torch.abs(x)", "0``. (default: 500) snip_edges (bool, optional): If this is set", "a window centered on the current frame (to the extent", "to 0 recovers the original Griffin-Lim method. Values near 1", "complex=2) >>> complex_specgrams = torch.randn(2, freq, 300, 2) >>> rate", "format: str, channels_first: bool = True, compression: Optional[float] = None,", "windows. ( Default: ``win_length // 2``) win_length (int): Window size.", "magphase( complex_tensor: Tensor, power: float = 1.0 ) -> Tuple[Tensor,", "is to the left. (bool, default = false) norm_vars (bool,", "be removed in v0.9.0 release. To suppress this warning, \"", "window (Tensor): Window tensor that is applied/multiplied to each frame/window", "* [3] <NAME> and <NAME>, \"Signal estimation from modified short-time", "STFT spectrogram of dimension (..., freq, frames) where freq is", "at start). Only applicable if center == false, ignored if", "center==true (int, default = 100) center (bool, optional): If true,", "= t - cmn_window window_end = t + 1 if", "which the output will be scaled by. power (float): If", "is defined as the weighted average of the frequency values,", "If this is set to false, the incomplete frames near", "\" f\"Or, the value for `n_freqs` ({n_freqs}) may be set", "Sample rate of the audio waveform. format (str): File format.", "ValueError('mel_scale should be one of \"htk\" or \"slaney\".') if mel_scale", "any dimension indices = torch.nn.functional.pad( indices, (pad_length, 0), mode=\"constant\", value=0.", "1, 'momentum={} > 1 can be unstable'.format(momentum) assert momentum >=", "angle_1 - angle_0 - phase_advance phase = phase - 2", "no.2, pp.236–243, Apr. 1984. Args: specgram (Tensor): A magnitude-only STFT", "the applied result would be ``A * create_fb_matrix(A.size(-1), ...)``. \"\"\"", "/ window_frames if norm_vars: if window_frames == 1: cmn_waveform[:, t,", "(float): Value to assign to the masked columns axis (int):", "start). Only applicable if center == false, ignored if center==true", "-1 and 1. Args: x_mu (Tensor): Input tensor quantization_channels (int):", "cmn_window else: window_start = t - cmn_window window_end = t", "lags to check lags = int(math.ceil(sample_rate / freq_low)) frame_size =", "A pitch extraction algorithm tuned for automatic speech recognition <NAME>,", "Example >>> freq, hop_length = 1025, 512 >>> # (channel,", "(float): Number to clamp ``x`` db_multiplier (float): Log10(max(reference value and", "math.pi * torch.round(phase / (2 * math.pi)) # Compute Phase", "torch.norm once issue is fixed # https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 *", "x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1,", "is set to false, the incomplete frames near the ending", "Function (NCCF). .. math:: \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i + N-1} w(n)", "return complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor, mask_param: int, mask_value: float,", "between STFT windows win_length (int): Window size power (float or", "Args: specgram (Tensor): A magnitude-only STFT spectrogram of dimension (...,", "magnitude. Args: waveform (Tensor): Tensor of audio of dimension (...,", "`n_freqs` ({n_freqs}) may be set too low.\" ) return fb", "torch.zeros( num_channels, num_feats, dtype=dtype, device=device) else: variance = cur_sumsq variance", "window=window, length=length) # unpack batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])", "Oct. 2013. * [3] <NAME> and <NAME>, \"Signal estimation from", "/ f_sp logstep = math.log(6.4) / 27.0 log_t = (mels", "each frame, take the highest value of NCCF, apply centered", "fast Griffin-Lim algorithm,\" IEEE Workshop on Applications of Signal Processing", "details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting Tensor. If ``channels_first=True``, it", "unpack batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def", "gives sharper filter. (default: 1) upsample_filter_width (int, optional): Integer that", "amplitude_to_DB( x: Tensor, multiplier: float, amin: float, db_multiplier: float, top_db:", "1D tensor over the given window. \"\"\" # Centered windowed", "2 # twice sum of integer squared denom = n", "1-4), Oct. 2013. * [3] <NAME> and <NAME>, \"Signal estimation", "torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu", "lag in range(1, lags + 1): s1 = waveform[..., :-lag].unfold(-1,", "optional): If true, use a window centered on the current", "batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid(", "= 0, simulate_first_pass_online: bool = False, recompute_frame: int = 500,", "we allow pitch tracking to introduce into the feature processing", "rebuilt = torch.tensor(0.) for _ in range(n_iter): # Store the", "Tensor: r\"\"\" Compute Normalized Cross-Correlation Function (NCCF). .. math:: \\phi_i(m)", "f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2) # create", "= 0.1, lowpass_cutoff: float = 1000, resample_frequency: float = 4000,", "window_start = t - cmn_window window_end = t + 1", "if ``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int,", "_mel_to_hz(m_pts, mel_scale=mel_scale) # calculate the difference between each mel point", "- f_min) / f_sp # Fill in the log-scale part", "mask_value: float, axis: int ) -> Tensor: r\"\"\" Apply a", "- 1) // 2 # twice sum of integer squared", "phase_advance phase = phase - 2 * math.pi * torch.round(phase", "Optional[str] = None, bits_per_sample: Optional[int] = None, ) -> Tensor:", "and 20. for amplitude amin (float): Number to clamp ``x``", "hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>>", "reasonable number is 80. (Default: ``None``) Returns: Tensor: Output tensor", "or None): Norm to use (either 'ortho' or None) Returns:", "librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center,", "2 and axis != 3: raise ValueError('Only Frequency and Time", "'slaney', divide the triangular mel weights by the width of", "E(m+b_i)}}, where :math:`\\phi_i(m)` is the NCCF at frame :math:`i` with", "magnitude-only STFT spectrogram of dimension (..., freq, frames) where freq", "== 2: specgram[:, :, mask_start:mask_end] = mask_value else: raise ValueError('Only", "if norm_vars: cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]", "should include a channel dimension and have the form `(batch,", "axis != 2 and axis != 3: raise ValueError('Only Frequency", "log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz * torch.exp(logstep", "running average CMN computation (int, default = 600) min_cmn_window (int,", "= ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)", "by the frame-shift. This makes different types of features give", "400.0) soft_min_f0 (float, optional): Minimum f0, applied in soft way,", "near 1 can lead to faster convergence, but above 1", "window: Tensor, n_fft: int, hop_length: int, win_length: int, power: Optional[float],", "half_size = nccf.shape[-1] // 2 half = torch.max(nccf[..., lag_min:half_size], -1)", "+ ~mask * b[0] indices = mask * a[1] +", "waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def amplitude_to_DB( x:", "Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Encode signal based on", "r\"\"\"Compute delta coefficients of a tensor, usually a spectrogram: ..", "of the frequency values, weighted by their magnitude. Args: waveform", "is taken. \"\"\" lag_min = int(math.ceil(sample_rate / freq_high)) # Find", "shape[-1:]) nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) indices = _find_max_per_frame(nccf,", "bin. Dimension of (freq, 1) Returns: Tensor: Complex Specgrams Stretch", "db_multiplier: float, top_db: Optional[float] = None ) -> Tensor: r\"\"\"Turn", "* (n + 0.5) * k) # size (n_mfcc, n_mels)", "= complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps = torch.arange(0,", "Returns: Tensor: Dimension (..., time) \"\"\" specgram = spectrogram(waveform, pad=pad,", "is the length of a frame, :math:`b_i` is the beginning", "tensor that is applied/multiplied to each frame/window n_fft (int): Size", "specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value) specgrams =", "window: Tensor, n_fft: int, hop_length: int, win_length: int, power: float,", "if rand_init: angles = 2 * math.pi * torch.rand(batch, freq,", "Tensor, rate: float, phase_advance: Tensor ) -> Tensor: r\"\"\"Given a", "frequency EPSILON = 10 ** (-9) freq = sample_rate /", "-2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec(", "30, freq_low: int = 85, freq_high: int = 3400, )", "* x), power) def _hz_to_mel(freq: float, mel_scale: str = \"htk\")", "_find_max_per_frame(nccf, sample_rate, freq_high) indices = _median_smoothing(indices, win_length) # Convert indices", "raw audio signal. The spectrogram can be either magnitude-only or", "optional): The number of frames used for energy normalization. (default:", "the audio waveform norm (Optional[str]): If 'slaney', divide the triangular", "= waveform.reshape(-1, shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram", "of audio of dimension (..., time) sample_rate (int): Sample rate", "<NAME>, <NAME>, <NAME> and <NAME> 2014 IEEE International Conference on", "analysis in python.\" In Proceedings of the 14th python in", "Mels converted in Hz \"\"\" if mel_scale not in ['slaney',", "The output of each tensor in a batch depends on", "Cross-Correlation Function (NCCF). .. math:: \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i + N-1}", "or None): Array length of the expected output. rand_init (bool):", "magnitude spectrogram, (must be > 0) e.g., 1 for energy,", "phase of the complex tensor \"\"\" mag = complex_norm(complex_tensor, power)", "mask_end = (min_value + value)[..., None, None] mask = torch.arange(0,", "* mask_param min_value = torch.rand(1) * (specgram.size(axis) - value) mask_start", "1 and returns a signal encoded with values from 0", "of `(..., complex=2)` Return: Tensor: Angle of a complex tensor.", "x = x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x)", "(EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON + torch.norm(s2, p=2,", "bool = True, compression: Optional[float] = None, encoding: Optional[str] =", "the magnitude spectrogram, (must be > 0) e.g., 1 for", "phase_advance (Tensor): Expected phase advance in each bin. Dimension of", "+ N-1} w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where :math:`\\phi_i(m)` is the NCCF", "to frequency EPSILON = 10 ** (-9) freq = sample_rate", "optional): Minimum CMN window used at start of decoding (adds", "'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\", ] def spectrogram( waveform: Tensor, pad:", "estimate of the phases inverse = torch.istft(specgram * angles, n_fft=n_fft,", "to be right-multiplied to row-wise data of size (``n_mels``, ``n_mfcc``).", "Frame shift in milliseconds. (default: 10.0) min_f0 (float, optional): Minimum", "= \"htk\") -> Tensor: \"\"\"Convert mel bin numbers to frequencies.", "frame_time: float = 10 ** (-2), win_length: int = 30,", "float = 25.0, frame_shift: float = 10.0, min_f0: float =", "for (Hz) (default: 50.0) max_f0 (float, optional): Maximum F0 to", "momentum: angles = angles - tprev.mul_(momentum / (1 + momentum))", "Mel frequencies mel_scale (str, optional): Scale to use: ``htk`` or", "and Time masking are supported') device = specgrams.device dtype =", "cur_sum / window_frames if norm_vars: if window_frames == 1: cmn_waveform[:,", "of the normed input tensor. Shape of `(..., )` \"\"\"", "2) / (window_frames ** 2)) variance = torch.pow(variance, -0.5) cmn_waveform[:,", "scale f_min = 0.0 f_sp = 200.0 / 3 freqs", "= 1000, resample_frequency: float = 4000, delta_pitch: float = 0.005,", "> num_frames: window_start -= (window_end - num_frames) window_end = num_frames", "Shape: ``(batch, frames 2)`` where the last dimension corresponds to", "shape = waveform.shape waveform = waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(", "power: float, normalized: bool, n_iter: int, momentum: float, length: Optional[int],", "specgram = specgram.reshape([-1] + list(shape[-2:])) value = torch.rand(1) * mask_param", "max_frames_latency: int = 0, frames_per_chunk: int = 0, simulate_first_pass_online: bool", "the frame at which we recompute some of the forward", "rate, phase_advance) >>> x.shape # with 231 == ceil(300 /", "coeffcients at time :math:`t`, :math:`N` is ``(win_length-1)//2``. Args: specgram (Tensor):", "min_value = torch.rand(1) * (specgram.size(axis) - value) mask_start = (min_value.long()).squeeze()", "= specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram def compute_deltas( specgram: Tensor,", "= cur_sumsq variance = variance / window_frames variance -= ((cur_sum", "n_mels (int): Number of mel filterbanks norm (str or None):", "audio of dimension (..., time) pad (int): Two sided padding", "<NAME> 2014 IEEE International Conference on Acoustics, Speech and Signal", "2494-2498, doi: 10.1109/ICASSP.2014.6854049. \"\"\" shape = waveform.shape waveform = waveform.reshape(-1,", "* torch.pow(torch.pow(10.0, 0.1 * x), power) def _hz_to_mel(freq: float, mel_scale:", "sample_rate // 2, n_freqs) # calculate mel freq bins m_min", "* angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) # unpack batch", "@_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float, frame_length: float =", "(new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2,", "-> Tensor: r\"\"\" Compute Normalized Cross-Correlation Function (NCCF). .. math::", "the file size divided by the frame-shift. This makes different", "torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) #", "where the last dimension corresponds to pitch and NCCF. Reference:", "final phase estimates waveform = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length,", "to highlight/apply to x the number of filterbanks. Each column", "= ( (s1 * s2).sum(-1) / (EPSILON + torch.norm(s1, p=2,", "as a form of augmentation. Args: waveform (Tensor): Audio data.", "Tensor, n_fft: int, hop_length: int, win_length: int, power: Optional[float], normalized:", "= false) Returns: Tensor: Tensor of freq of dimension (...,", "1) * (2 * n + 1) / 3 specgram", "= _find_max_per_frame(nccf, sample_rate, freq_high) indices = _median_smoothing(indices, win_length) # Convert", "= 400, soft_min_f0: float = 10.0, penalty_factor: float = 0.1,", "size. (Default: ``n_fft``) power (float): Exponent for the magnitude spectrogram,", "output_lag = [] for lag in range(1, lags + 1):", "the lags is very close to the first half of", "mu + 0.5).to(torch.int64) return x_mu def mu_law_decoding( x_mu: Tensor, quantization_channels:", "1 x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db = torch.max(x_db,", "(bool, optional): If true, normalize variance to one. (bool, default", "(bool): Whether to normalize by magnitude after stft center (bool,", "is centered at time :math:`t \\times \\text{hop\\_length}`. Default: ``True`` pad_mode", "(str, optional): Mode parameter passed to padding (Default: ``\"replicate\"``) Returns:", "= torch.norm(complex_specgrams_1, p=2, dim=-1) phase = angle_1 - angle_0 -", "frame_time)) waveform_length = waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length / frame_size)) p", "``[time, channel]``. \"\"\" bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first,", "= 1000.0 min_log_mel = (min_log_hz - f_min) / f_sp logstep", "raise ValueError(\"norm must be one of None or 'slaney'\") #", "# https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def angle( complex_tensor: Tensor", "-> Tensor: r\"\"\"Compute waveform from a linear scale magnitude spectrogram", "1.0 phase_0 = angle(complex_specgrams[..., :1, :]) # Time Padding complex_specgrams", "(float, optional): Frequency that we down-sample the signal to. Must", "torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1,", "\\sum_{n=1}^{\\text{N}} n^2} where :math:`d_t` is the deltas at time :math:`t`,", "f\"{sample_rate}\"]], channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor,", "revising our estimate of the signal energy. Relevant if ``frames_per_chunk", "batch, freq, frames = specgram.size() if rand_init: angles = 2", ") return fb def create_dct( n_mfcc: int, n_mels: int, norm:", "or a batch of spectrograms from a raw audio signal.", "ASSP, vol.32, no.2, pp.236–243, Apr. 1984. Args: specgram (Tensor): A", "Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between 0", "and music signal analysis in python.\" In Proceedings of the", "= angles - tprev.mul_(momentum / (1 + momentum)) angles =", "frequency, 3 -> time) Returns: Tensor: Masked spectrograms of dimensions", "cmn_waveform = torch.zeros( num_channels, num_frames, num_feats, dtype=dtype, device=device) for t", "if window_end > t: window_end = max(t + 1, min_cmn_window)", "as the weighted average of the frequency values, weighted by", "`1.0`). Returns: Tensor: Power of the normed input tensor. Shape", "nccf_ballast: float = 7000, lowpass_filter_width: int = 1, upsample_filter_width: int", "waveform.shape num_frames, num_feats = input_shape[-2:] waveform = waveform.view(-1, num_frames, num_feats)", "Args: specgrams (Tensor): Real spectrograms (batch, channel, freq, time) mask_param", "time) Returns: Tensor: Masked spectrogram of dimensions (channel, freq, time)", "and quantization_channels - 1 and returns a signal scaled between", "mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale should be one", "0), mode=\"constant\", value=0. ) indices[..., :pad_length] = torch.cat(pad_length * [indices[...,", "# twice sum of integer squared denom = n *", "(area normalization). (Default: ``None``) mel_scale (str, optional): Scale to use:", "_compute_nccf( waveform: Tensor, sample_rate: int, frame_time: float, freq_low: int )", "Tensor, ref: float, power: float ) -> Tensor: r\"\"\"Turn a", "Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh: float = 0.99 )", "1``. window (Tensor): Window tensor that is applied/multiplied to each", "in frames for running average CMN computation (int, default =", "freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox()", "the phases inverse = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length,", "assuming there is a matrix A of size (..., ``n_freqs``),", "one of None or 'slaney'\") # freq bins # Equivalent", "2``) win_length (int): Window size. (Default: ``n_fft``) power (float): Exponent", "mel filterbank has all zero values. \" f\"The value for", "mode (str, optional): Mode parameter passed to padding (Default: ``\"replicate\"``)", "a batch of spectrograms from a raw audio signal. The", "Optional[str] = None, mel_scale: str = \"htk\", ) -> Tensor:", "// 2 # twice sum of integer squared denom =", "power: Optional[float], normalized: bool, center: bool = True, pad_mode: str", ">>> x.shape # with 231 == ceil(300 / 1.3) torch.Size([2,", "= complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long()) angle_0", "n + 1) / 3 specgram = torch.nn.functional.pad(specgram, (n, n),", "dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)", "- c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2} where :math:`d_t` is the deltas at", "scale to the decibel scale. The output of each tensor", "has ``[channel, time]`` else ``[time, channel]``. \"\"\" bytes = io.BytesIO()", "(float): Power of the norm. (Default: `1.0`) Returns: (Tensor, Tensor):", "= int(math.ceil(sample_rate / freq_high)) # Find near enough max that", "or \"slaney\".') if mel_scale == \"htk\": return 2595.0 * math.log10(1.0", "norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)", ") -> Tensor: r\"\"\" Compute Normalized Cross-Correlation Function (NCCF). ..", "up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels) fb", "-1) specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value) specgrams", "angle_0 - phase_advance phase = phase - 2 * math.pi", "value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param min_value = torch.rand(specgrams.shape[:2],", "-> Tensor: r\"\"\"Detect pitch frequency. It is implemented using normalized", "apply masking on (2 -> frequency, 3 -> time) Returns:", "= t - cmn_window // 2 window_end = window_start +", "(n_mels + 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs,", "Tensor, sample_rate: int, format: str, channels_first: bool = True, compression:", "# unpack batch output = output.reshape(shape) return output def _compute_nccf(", "assert momentum >= 0, 'momentum={} < 0'.format(momentum) if normalized: warnings.warn(", "`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between", "assert norm == \"ortho\" dct[0] *= 1.0 / math.sqrt(2.0) dct", "- top_db).view(-1, 1, 1, 1)) # Repack batch x_db =", "WAV. For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional): Changes", "0``. recompute_frame (int, optional): Only relevant for compatibility with online", "using normalized cross-correlation function and median smoothing. Args: waveform (Tensor):", "in range(1, lags + 1): s1 = waveform[..., :-lag].unfold(-1, frame_size,", "specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq,", "= specgrams.device dtype = specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype)", "and ``v_0`` from ``uniform(0, max_v - v)``. All examples will", "Args: waveform (Tensor): The input waveform of shape `(..., time)`.", "10.0) penalty_factor (float, optional): Cost factor for FO change. (default:", "x_db.dim() > 2 else 1 x_db = x_db.reshape(-1, packed_channels, shape[-2],", "of the features, which is the default. (default: False) Relevant", "complex tensor input. Args: complex_tensor (Tensor): Tensor shape of `(...,", "\"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\", \"angle\", \"magphase\", \"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn',", "window_start = 0 if last_window_start == -1: input_part = waveform[:,", "FFT hop_length (int): Length of hop between STFT windows win_length", "complex_specgrams_stretch def mask_along_axis_iid( specgrams: Tensor, mask_param: int, mask_value: float, axis:", "left. (bool, default = false) norm_vars (bool, optional): If true,", "# (channel, freq, time, complex=2) >>> complex_specgrams = torch.randn(2, freq,", "and ``n_fft`` is the number of Fourier bins, and time", "faster convergence, but above 1 may not converge. length (int", "n_iter: int, momentum: float, length: Optional[int], rand_init: bool ) ->", "soft way, must not exceed min-f0 (default: 10.0) penalty_factor (float,", "and ``v_0`` from ``uniform(0, max_v - v)``. Args: specgrams (Tensor):", "specgram: Tensor, window: Tensor, n_fft: int, hop_length: int, win_length: int,", ":] cur_sum += frame_to_add if norm_vars: cur_sumsq += (frame_to_add **", "mel band (area normalization). (Default: ``None``) mel_scale (str, optional): Scale", "of (..., time), where time equals the ``length`` parameter if", "will be applied from indices ``[v_0, v_0 + v)``, where", "input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True,", "min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value) #", "frequency. Note: If the max among all the lags is", "1) / 2 * mu + 0.5).to(torch.int64) return x_mu def", "waveform.shape[-1:]) return waveform def amplitude_to_DB( x: Tensor, multiplier: float, amin:", "== \"ortho\" dct[0] *= 1.0 / math.sqrt(2.0) dct *= math.sqrt(2.0", "int = 30, freq_low: int = 85, freq_high: int =", "bytes, effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch(", "filter (Hz) (default: 1000) resample_frequency (float, optional): Frequency that we", "optional): Frame length in milliseconds. (default: 25.0) frame_shift (float, optional):", "python in science conference, pp. 18-25. 2015. * [2] <NAME>.,", "warnings.warn( \"At least one mel filterbank has all zero values.", "# (n_freqs, n_mels) fb = torch.max(zero, torch.min(down_slopes, up_slopes)) if norm", "complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def angle( complex_tensor: Tensor ) -> Tensor:", "edge won't be snipped, so that the number of frames", "x_mu: Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Decode mu-law encoded", "7000, lowpass_filter_width: int = 1, upsample_filter_width: int = 5, max_frames_latency:", "Tensor shape of `(..., complex=2)` power (float): Power of the", "the ``length`` parameter if given. \"\"\" assert momentum < 1,", "torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2]) # (new_bins, freq, 2) complex_specgrams_0", "``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. All", "(int, optional): The window length used for computing delta (Default:", "torch.max(nccf[..., lag_min:half_size], -1) best = _combine_max(half, best) indices = best[1]", "2, steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1)) freq_dim =", "Tensor of audio of dimension (..., freq, time) win_length (int,", "enough max that is smallest best = torch.max(nccf[..., lag_min:], -1)", "int = 0, simulate_first_pass_online: bool = False, recompute_frame: int =", "Power of the norm. (Default: `1.0`) Returns: (Tensor, Tensor): The", "to row-wise data of size (``n_mels``, ``n_mfcc``). \"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II", "of decoding (adds latency only at start). Only applicable if", "# Create broadcastable mask mask_start = min_value[..., None, None] mask_end", "in hertz f_diff = f_pts[1:] - f_pts[:-1] # (n_mels +", "window_frames == 1: cmn_waveform[:, t, :] = torch.zeros( num_channels, num_feats,", "(int, default = 100) center (bool, optional): If true, use", "factor reduces NCCF for quiet frames (default: 7000) lowpass_filter_width (int,", "Args: x (Tensor): Input spectrogram(s) before being converted to decibel", "num_channels = waveform.shape[0] dtype = waveform.dtype device = waveform.device last_window_start", "return 700.0 * (10.0**(mels / 2595.0) - 1.0) # Fill", "to highlight/apply f_min (float): Minimum frequency (Hz) f_max (float): Maximum", "encoding \"\"\" mu = quantization_channels - 1.0 if not x.is_floating_point():", "first if bigger than a multiplicative factor of the second,", "python.\" In Proceedings of the 14th python in science conference,", "indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll =", "norm: Optional[str] = None, mel_scale: str = \"htk\", ) ->", "frame :math:`i` with lag :math:`m`, :math:`w` is the waveform, :math:`N`", "argument normalized is not used in Griffin-Lim, \" \"and will", "-> Tensor: r\"\"\"Create a DCT transformation matrix with shape (``n_mels``,", "of `(..., )` \"\"\" return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) def", "frame_size)[..., :num_of_frames, :] output_frames = ( (s1 * s2).sum(-1) /", "is a filterbank so that assuming there is a matrix", "Dimension of `(..., freq, time, complex=2)` rate (float): Speed-up factor", "center: bool = False, norm_vars: bool = False, ) ->", "time)`. Batched inputs should include a channel dimension and have", ")` \"\"\" return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) def magphase( complex_tensor:", "27.0 log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz *", "+ 0.5).to(torch.int64) return x_mu def mu_law_decoding( x_mu: Tensor, quantization_channels: int", "determines filter width when upsampling NCCF. (default: 5) max_frames_latency (int,", "max that is smallest best = torch.max(nccf[..., lag_min:], -1) half_size", "1 if window_start < 0: window_end -= window_start window_start =", "= torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length).float() #", ":]) # Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0,", "a tensor from the decibel scale to the power/amplitude scale.", "(float): Reference which the output will be scaled by. power", ":num_of_frames, :] output_frames = ( (s1 * s2).sum(-1) / (EPSILON", "and <NAME> 2014 IEEE International Conference on Acoustics, Speech and", "hop_length=hop_length, win_length=win_length, window=window, length=length) # unpack batch waveform = waveform.reshape(shape[:-2]", "and norm == \"slaney\": # Slaney-style mel is scaled to", "(``n_mels``, ``n_mfcc``), normalized depending on norm. Args: n_mfcc (int): Number", "use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: Tensor: Triangular filter", "frames) (Default: ``30``). freq_low (int, optional): Lowest frequency that can", "a linear scale magnitude spectrogram using the Griffin-Lim transformation. Implementation", "80. (Default: ``None``) Returns: Tensor: Output tensor in decibel scale", "optional): whether to pad :attr:`waveform` on both sides so that", "Args: n_freqs (int): Number of frequencies to highlight/apply f_min (float):", "phase = phase - 2 * math.pi * torch.round(phase /", "estimation from modified short-time Fourier transform,\" IEEE Trans. ASSP, vol.32,", "size (``n_mels``, ``n_mfcc``). \"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k", "sharper filter. (default: 1) upsample_filter_width (int, optional): Integer that determines", "upsample_filter_width (int, optional): Integer that determines filter width when upsampling", "waveform.dtype device = waveform.device last_window_start = last_window_end = -1 cur_sum", "-> Tensor: r\"\"\"Create a spectrogram or a batch of spectrograms", "assumes the signal has been scaled to between -1 and", "sides so that the :math:`t`-th frame is centered at time", "(float, optional): Cutoff frequency for LowPass filter (Hz) (default: 1000)", "amplitude. Returns: Tensor: Output tensor in power/amplitude scale. \"\"\" return", "best[1] # Add back minimal lag indices += lag_min #", "1: cmn_waveform[:, t, :] = torch.zeros( num_channels, num_feats, dtype=dtype, device=device)", "to apply masking on (2 -> frequency, 3 -> time)", ":num_of_frames, :] s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]", "on the current frame (to the extent possible, modulo end", "-> Tensor: r\"\"\" Compute the spectral centroid for each channel", "- 1.0 if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu =", "n), mode=mode) kernel = torch.arange(-n, n + 1, 1, device=device,", "10 ** (-9) freq = sample_rate / (EPSILON + indices.to(torch.float))", "short-time Fourier transform,\" IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr.", "of dimensions (channel, freq, time) \"\"\" # pack batch shape", "is not None and norm == \"slaney\": # Slaney-style mel", "the forward pointers, after revising our estimate of the signal", "None, None] mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) # Per", "Tensor: Power of the normed input tensor. Shape of `(...,", "value of that tensor, and so may return different values", "on (2 -> frequency, 3 -> time) Returns: Tensor: Masked", "# unpack batch specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram", "r\"\"\" Apply codecs as a form of augmentation. Args: waveform", "# pack batch shape = list(waveform.size()) waveform = waveform.reshape([-1] +", "warnings from typing import Optional, Tuple import torch from torch", "frames (default: 7000) lowpass_filter_width (int, optional): Integer that determines filter", "Acoustics (pp. 1-4), Oct. 2013. * [3] <NAME> and <NAME>,", "the triangular mel weights by the width of the mel", "Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` power (float):", "complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())", "of frequencies to highlight/apply to x the number of filterbanks.", "version of the features, which is the default. (default: False)", "= -2 return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def", "0, math.pi * hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams,", "with shape (``n_mels``, ``n_mfcc``), normalized depending on norm. Args: n_mfcc", "bool ) -> Tensor: r\"\"\"Compute waveform from a linear scale", "start of decoding (adds latency only at start). Only applicable", "optional): controls the padding method used when :attr:`center` is ``True``.", "``True`` pad_mode (string, optional): controls the padding method used when", "This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi. Args:", "0, 0, 2]) # (new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-2,", "To suppress this warning, \" \"please use `normalized=False`.\") # pack", "value and amin)) top_db (float or None, optional): Minimum negative", "(n_freqs, n_mels) up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs,", "normalized: bool, center: bool = True, pad_mode: str = \"reflect\",", "torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype)", "phase estimates waveform = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length,", "It is implemented using normalized cross-correlation function and median smoothing.", "= mask_value else: raise ValueError('Only Frequency and Time masking are", "be one of None or 'slaney'\") # freq bins #", "of audio of dimension (..., freq, time) sample_rate (int): The", "if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype)", "supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional):", "vol.32, no.2, pp.236–243, Apr. 1984. Args: specgram (Tensor): A magnitude-only", "steps=1 + n_fft // 2, device=specgram.device).reshape((-1, 1)) freq_dim = -2", "is not None and norm != \"slaney\": raise ValueError(\"norm must", "would be ``A * create_fb_matrix(A.size(-1), ...)``. \"\"\" if norm is", "\"\"\" mask = (a[0] > thresh * b[0]) values =", "cur_sum -= frame_to_remove if norm_vars: cur_sumsq -= (frame_to_remove ** 2)", "(win_length - 1) // 2 # twice sum of integer", "a channel dimension and have the form `(batch, channel, freq,", "to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: Tensor: Triangular", "``htk``) Returns: Tensor: Triangular filter banks (fb matrix) of size", "= torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x))", "warning, \" \"please use `normalized=False`.\") # pack batch shape =", "min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) / f_sp", "(bool, optional): controls whether to return half of results to", "math.sqrt(2.0 / float(n_mels)) return dct.t() def mu_law_encoding( x: Tensor, quantization_channels:", "/ 2595.0) - 1.0) # Fill in the linear scale", "sample_rate / (EPSILON + indices.to(torch.float)) # unpack batch freq =", "``slaney``. (Default: ``htk``) Returns: mels (float): Frequency in Mels \"\"\"", "is ``True``. Default: ``\"reflect\"`` onesided (bool, optional): controls whether to", "freq, frames) else: angles = torch.zeros(batch, freq, frames) angles =", "None: dct *= 2.0 else: assert norm == \"ortho\" dct[0]", "r\"\"\" Apply a mask along ``axis``. Mask will be applied", "= x_db.reshape(shape) return x_db def DB_to_amplitude( x: Tensor, ref: float,", "(int, optional): Highest frequency that can be detected (Hz) (Default:", "\"create_fb_matrix\", \"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\", \"angle\", \"magphase\",", "def mask_along_axis_iid( specgrams: Tensor, mask_param: int, mask_value: float, axis: int", "(default: True) Returns: Tensor: Pitch feature. Shape: ``(batch, frames 2)``", "values for an audio clip split into snippets vs. a", "data. Must be 2 dimensional. See also ```channels_first```. sample_rate (int):", "each mel point and each stft freq point in hertz", "by magnitude after stft. n_iter (int): Number of iteration for", "from a linear scale magnitude spectrogram using the Griffin-Lim transformation.", "Args: x (Tensor): Input tensor before being converted to power/amplitude", "that correspond to what an online decoder would see in", "float, length: Optional[int], rand_init: bool ) -> Tensor: r\"\"\"Compute waveform", "# Number of lags to check lags = int(math.ceil(sample_rate /", "phase_0 = angle(complex_specgrams[..., :1, :]) # Time Padding complex_specgrams =", "filter width when upsampling NCCF. (default: 5) max_frames_latency (int, optional):", "-> Tensor: \"\"\"Convert mel bin numbers to frequencies. Args: mels", "International Conference on Acoustics, Speech and Signal Processing (ICASSP), Florence,", "mel weights by the width of the mel band (area", "Returns: Tensor: Dimension (..., freq, time), freq is ``n_fft //", "(..., freq, time), freq is ``n_fft // 2 + 1``", "calibration offset indices += 1 return indices def _median_smoothing( indices:", "1)[:, -1, :] else: if window_start > last_window_start: frame_to_remove =", "effects). If false, window is to the left. (bool, default", "mels (Tensor): Mel frequencies mel_scale (str, optional): Scale to use:", "Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate),", "we recompute some of the forward pointers, after revising our", "hop_length: int, win_length: int, power: float, normalized: bool, n_iter: int,", "= 25.0, frame_shift: float = 10.0, min_f0: float = 50,", "the spectrogram rebuilt = torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length,", "+ 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output = torch.nn.functional.conv1d(specgram,", "the spectral centroid for each channel along the time axis.", "r\"\"\" Apply median smoothing to the 1D tensor over the", "2)` into its magnitude and phase. Args: complex_tensor (Tensor): Tensor", "constant energy per channel enorm = 2.0 / (f_pts[2:n_mels +", "Tensor: r\"\"\"Compute delta coefficients of a tensor, usually a spectrogram:", "for power and 20. for amplitude amin (float): Number to", "return values, indices def _find_max_per_frame( nccf: Tensor, sample_rate: int, freq_high:", "batch example masking specgrams = specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >= mask_start)", "Relevant if ``frames_per_chunk > 0``. (default: 500) snip_edges (bool, optional):", "(2 * math.pi)) # Compute Phase Accum phase = phase", "batch shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) specgram", "None) Returns: Tensor: The transformation matrix, to be right-multiplied to", "<NAME> and <NAME> 2014 IEEE International Conference on Acoustics, Speech", "None, then the complex spectrum is returned instead. normalized (bool):", "of results to avoid redundancy. Default: ``True`` Returns: Tensor: Dimension", "each frame/window n_fft (int): Size of FFT hop_length (int): Length", "/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf =", "0: # TODO add \"with torch.no_grad():\" back when JIT supports", ":math:`c_t` is the spectrogram coeffcients at time :math:`t`, :math:`N` is", "* (n + 1) * (2 * n + 1)", ">>> x = phase_vocoder(complex_specgrams, rate, phase_advance) >>> x.shape # with", "-1) return nccf def _combine_max( a: Tuple[Tensor, Tensor], b: Tuple[Tensor,", "pad > 0: # TODO add \"with torch.no_grad():\" back when", "+ 2) # create overlapping triangles zero = torch.zeros(1) down_slopes", "the norm. (Default: `1.0`) Returns: (Tensor, Tensor): The magnitude and", "use (either 'ortho' or None) Returns: Tensor: The transformation matrix,", "Scale to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: Tensor:", "of `(..., )` \"\"\" # Replace by torch.norm once issue", "def detect_pitch_frequency( waveform: Tensor, sample_rate: int, frame_time: float = 10", "win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack", "Number to clamp ``x`` db_multiplier (float): Log10(max(reference value and amin))", "zero = torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2]) /", "Replace by torch.norm once issue is fixed # https://github.com/pytorch/pytorch/issues/34279 return", "The window length used for computing delta (Default: ``5``) mode", "mask_param: int, mask_value: float, axis: int ) -> Tensor: r\"\"\"", "point and each stft freq point in hertz f_diff =", "Tensor, power: float = 1.0 ) -> Tuple[Tensor, Tensor]: r\"\"\"Separate", "Maximum F0 to search for (Hz) (default: 400.0) soft_min_f0 (float,", "lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, ) result =", "= torch.linspace( >>> 0, math.pi * hop_length, freq)[..., None] >>>", "None): Exponent for the magnitude spectrogram, (must be > 0)", "iterate tprev = rebuilt # Invert with our current estimate", "int, power: Optional[float], normalized: bool, center: bool = True, pad_mode:", "x_mu (Tensor): Input tensor quantization_channels (int): Number of channels Returns:", "max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, ) result = result.reshape(shape[:-1] +", "when :attr:`center` is ``True``. Default: ``\"reflect\"`` onesided (bool, optional): controls", "waveform, :math:`N` is the length of a frame, :math:`b_i` is", "1, 1)) # Repack batch x_db = x_db.reshape(shape) return x_db", "of deltas of dimension (..., freq, time) Example >>> specgram", "shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) specgram =", "num_feats)) if len(input_shape) == 2: cmn_waveform = cmn_waveform.squeeze(0) return cmn_waveform", "for formats other than WAV. For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`.", "``htk``) Returns: mels (float): Frequency in Mels \"\"\" if mel_scale", "waveform = waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length,", "torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi / float(n_mels) *", "``htk``) Returns: freqs (Tensor): Mels converted in Hz \"\"\" if", "mask * a[1] + ~mask * b[1] return values, indices", "bool = True, pad_mode: str = \"reflect\", onesided: bool =", "``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0,", "passed to padding (Default: ``\"replicate\"``) Returns: Tensor: Tensor of deltas", "lowpass_filter_width (int, optional): Integer that determines filter width of lowpass", "r\"\"\"Create a spectrogram or a batch of spectrograms from a", "<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. \"librosa: Audio and music", "'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\", ] def spectrogram( waveform: Tensor, pad: int,", "db_multiplier if top_db is not None: # Expand batch shape", "-1 cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels,", "or ``slaney``. (Default: ``htk``) Returns: Tensor: Triangular filter banks (fb", "freq, time) \"\"\" # pack batch shape = specgram.size() specgram", "= cmn_waveform.squeeze(0) return cmn_waveform def spectral_centroid( waveform: Tensor, sample_rate: int,", "time) \"\"\" if axis != 2 and axis != 3:", "/ (EPSILON + indices.to(torch.float)) # unpack batch freq = freq.reshape(shape[:-1]", "* hop_length, freq)[..., None] >>> x = phase_vocoder(complex_specgrams, rate, phase_advance)", "all_freqs = torch.linspace(0, sample_rate // 2, n_freqs) # calculate mel", "window_end - window_start last_window_start = window_start last_window_end = window_end cmn_waveform[:,", "(channel, freq, time, complex=2) >>> complex_specgrams = torch.randn(2, freq, 300,", "variance cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats)) if len(input_shape) ==", "process. momentum (float): The momentum parameter for fast Griffin-Lim. Setting", "logstep return mels def _mel_to_hz(mels: Tensor, mel_scale: str = \"htk\")", "original Griffin-Lim method. Values near 1 can lead to faster", "return x_mu def mu_law_decoding( x_mu: Tensor, quantization_channels: int ) ->", "None] mask_end = (min_value + value)[..., None, None] mask =", "Tensor: r\"\"\"Turn a tensor from the decibel scale to the", "channel, freq, time) mask_param (int): Number of columns to be", "lowpass_cutoff (float, optional): Cutoff frequency for LowPass filter (Hz) (default:", "# unpack batch freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq", "feature processing (affects output only if ``frames_per_chunk > 0`` and", "power/amplitude scale. ref (float): Reference which the output will be", "0.99 ) -> Tuple[Tensor, Tensor]: \"\"\" Take value from first", "<https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between 0 and", "to 0 rebuilt = torch.tensor(0.) for _ in range(n_iter): #", "indices += 1 return indices def _median_smoothing( indices: Tensor, win_length:", "1, 1) output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom #", "a spectrogram or a batch of spectrograms from a raw", "smoothing. Args: waveform (Tensor): Tensor of audio of dimension (...,", "in Griffin-Lim, \" \"and will be removed in v0.9.0 release.", "\\times \\text{hop\\_length}`. Default: ``True`` pad_mode (string, optional): controls the padding", "recovery process. momentum (float): The momentum parameter for fast Griffin-Lim.", "(bool): Initializes phase randomly if True, to zero otherwise. Returns:", "specgram def compute_deltas( specgram: Tensor, win_length: int = 5, mode:", "pitch frequency. It is implemented using normalized cross-correlation function and", "+ shape[-1:]) nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) indices =", "win_length: int, ) -> Tensor: r\"\"\" Compute the spectral centroid", "of dimension (..., time) sample_rate (int): Sample rate of the", "max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency,", "torch.cumsum(input_part ** 2, 1)[:, -1, :] else: if window_start >", "r\"\"\"Decode mu-law encoded signal. For more info see the `Wikipedia", "torch.sum(input_part, 1) if norm_vars: cur_sumsq += torch.cumsum(input_part ** 2, 1)[:,", "some of the forward pointers, after revising our estimate of", "frame_shift (float, optional): Frame shift in milliseconds. (default: 10.0) min_f0", "m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max, n_mels +", "construction by Librosa all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)", "(Default: ``None``) Returns: Tensor: Output tensor in decibel scale \"\"\"", "s2).sum(-1) / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON +", "the form `(..., freq, time)`. Batched inputs should include a", "# calculate the difference between each mel point and each", "not x.is_floating_point(): x = x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu", "# Compute Phase Accum phase = phase + phase_advance phase", "freq, time) \"\"\" if axis != 2 and axis !=", "device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) /", "= variance / window_frames variance -= ((cur_sum ** 2) /", "Optional[float] = None, encoding: Optional[str] = None, bits_per_sample: Optional[int] =", "1984. Args: specgram (Tensor): A magnitude-only STFT spectrogram of dimension", "2595.0 * math.log10(1.0 + (freq / 700.0)) # Fill in", "supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting", "along ``axis``. Mask will be applied from indices ``[v_0, v_0", "( Default: ``win_length // 2``) win_length (int): Window size. (Default:", "``n_fft // 2 + 1`` and ``n_fft`` is the number", "to use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: freqs (Tensor):", "alphas * norm_1 + (1 - alphas) * norm_0 real_stretch", "frequency for LowPass filter (Hz) (default: 1000) resample_frequency (float, optional):", "indices.to(torch.float)) # unpack batch freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) return", "Number of iteration for phase recovery process. momentum (float): The", "Add back minimal lag indices += lag_min # Add 1", "(default: 25.0) frame_shift (float, optional): Frame shift in milliseconds. (default:", "(NCCF). .. math:: \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\\sqrt{E(b_i)", "x_db -= multiplier * db_multiplier if top_db is not None:", "Hz mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``.", "/ freq_low)) frame_size = int(math.ceil(sample_rate * frame_time)) waveform_length = waveform.size()[-1]", ":math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\" EPSILON = 10 ** (-9) # Number", "and time is the number of window hops (n_frame). \"\"\"", "of a frame (Default: ``10 ** (-2)``). win_length (int, optional):", "from modified short-time Fourier transform,\" IEEE Trans. ASSP, vol.32, no.2,", "specgram (Tensor): Real spectrogram (channel, freq, time) mask_param (int): Number", "device = specgram.device dtype = specgram.dtype # pack batch shape", ") -> Tensor: r\"\"\" Apply codecs as a form of", "* math.pi * torch.round(phase / (2 * math.pi)) # Compute", "a factor of ``rate``. Args: complex_specgrams (Tensor): Dimension of `(...,", "# Expand batch shape = x_db.size() packed_channels = shape[-3] if", "bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts", "0 rebuilt = torch.tensor(0.) for _ in range(n_iter): # Store", "power, etc. If None, then the complex spectrum is returned", "norm_1 + (1 - alphas) * norm_0 real_stretch = mag", "m_max, n_mels + 2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) # calculate", "# (n_freqs, n_mels) up_slopes = slopes[:, 2:] / f_diff[1:] #", "equals 1, will compute DB to power. If 0.5, will", "x (Tensor): Input spectrogram(s) before being converted to decibel scale.", "angle of complex tensor input. Args: complex_tensor (Tensor): Tensor shape", "complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def mask_along_axis_iid( specgrams:", "when JIT supports it waveform = torch.nn.functional.pad(waveform, (pad, pad), \"constant\")", "= waveform[:, window_start: window_end - window_start, :] cur_sum += torch.sum(input_part,", "more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional): Changes the bit", "least one mel filterbank has all zero values. \" f\"The", "power (float): Power of the norm. (Default: `1.0`). Returns: Tensor:", "is ``(win_length-1)//2``. Args: specgram (Tensor): Tensor of audio of dimension", "amplitude amin (float): Number to clamp ``x`` db_multiplier (float): Log10(max(reference", "if bigger than a multiplicative factor of the second, elementwise.", "of `waveform`. frame_length (float, optional): Frame length in milliseconds. (default:", "int ) -> Tensor: r\"\"\"Encode signal based on mu-law companding.", "= torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase, -1) mag", "int, format: str, channels_first: bool = True, compression: Optional[float] =", "specgrams.size(axis), device=device, dtype=dtype) # Per batch example masking specgrams =", "min_log_mel + math.log(freq / min_log_hz) / logstep return mels def", "# pack batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1])", "frames) else: angles = torch.zeros(batch, freq, frames) angles = torch.stack([angles.cos(),", "the :math:`t`-th frame is centered at time :math:`t \\times \\text{hop\\_length}`.", "with 231 == ceil(300 / 1.3) torch.Size([2, 1025, 231, 2])", "too low.\" ) return fb def create_dct( n_mfcc: int, n_mels:", "specgram.reshape(1, -1, shape[-1]) assert win_length >= 3 n = (win_length", "median smoothing to the 1D tensor over the given window.", "spectrogram rebuilt = torch.view_as_real( torch.stft( input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window,", "For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects", "Must be more than twice lowpass-cutoff. (default: 4000) delta_pitch( float,", "Acoustics, Speech and Signal Processing (ICASSP), Florence, 2014, pp. 2494-2498,", "\" f\"The value for `n_mels` ({n_mels}) may be set too", "(int, optional): Integer that determines filter width of lowpass filter,", "torch.randn(1, 40, 1000) >>> delta = compute_deltas(specgram) >>> delta2 =", "window_start last_window_end = window_end cmn_waveform[:, t, :] = waveform[:, t,", "if power is not None: if power == 1.0: return", "center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True, ) ) # Update our", "being converted to power/amplitude scale. ref (float): Reference which the", "# And now the nonlinear scale min_log_hz = 1000.0 min_log_mel", "+= frame_to_add if norm_vars: cur_sumsq += (frame_to_add ** 2) window_frames", "method. Values near 1 can lead to faster convergence, but", "decibel scale. Input should take the form `(..., freq, time)`.", "batch shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) value", "the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor:", "(default: 10.0) penalty_factor (float, optional): Cost factor for FO change.", "Compute Phase Accum phase = phase + phase_advance phase =", "power. If 0.5, will compute DB to amplitude. Returns: Tensor:", "= torch.cat(output_lag, -1) return nccf def _combine_max( a: Tuple[Tensor, Tensor],", "time) Example >>> specgram = torch.randn(1, 40, 1000) >>> delta", "= (min_log_hz - f_min) / f_sp logstep = math.log(6.4) /", "to the decibel scale. The output of each tensor in", "value) # Create broadcastable mask mask_start = min_value[..., None, None]", "Increasing this factor reduces NCCF for quiet frames (default: 7000)", "nccf = torch.cat(output_lag, -1) return nccf def _combine_max( a: Tuple[Tensor,", "frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, ) result = result.reshape(shape[:-1] + result.shape[-2:])", "exceed min-f0 (default: 10.0) penalty_factor (float, optional): Cost factor for", "either magnitude-only or complex. Args: waveform (Tensor): Tensor of audio", "enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels]) fb", "Number of mel filterbanks sample_rate (int): Sample rate of the", "- 2 * math.pi * torch.round(phase / (2 * math.pi))", "2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. \"\"\" shape = waveform.shape waveform", "# Fill in the log-scale part min_log_hz = 1000.0 min_log_mel", "max_v - v)``. Args: specgrams (Tensor): Real spectrograms (batch, channel,", "be masked will be uniformly sampled from [0, mask_param] mask_value", "the waveform, :math:`N` is the length of a frame, :math:`b_i`", "and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int, optional): The number of", "of size (..., ``n_freqs``), the applied result would be ``A", "encoding, bits_per_sample ) bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\",", "for an audio clip split into snippets vs. a full", "torch.stack([real_stretch, imag_stretch], dim=-1) # unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] +", "mask_start) & (mask < mask_end), mask_value) specgrams = specgrams.transpose(axis, -1)", "device = waveform.device last_window_start = last_window_end = -1 cur_sum =", "shape `(..., time)`. sample_rate (float): Sample rate of `waveform`. frame_length", "len(input_shape) == 2: cmn_waveform = cmn_waveform.squeeze(0) return cmn_waveform def spectral_centroid(", "1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels +", "filterbanks norm (str or None): Norm to use (either 'ortho'", "hop_length (int): Length of hop between STFT windows. ( Default:", "approx constant energy per channel enorm = 2.0 / (f_pts[2:n_mels", "2 * math.pi * torch.round(phase / (2 * math.pi)) #", "max_v - v)``. All examples will have the same mask", "to check lags = int(math.ceil(sample_rate / freq_low)) frame_size = int(math.ceil(sample_rate", "mask_start = min_value[..., None, None] mask_end = (min_value + value)[...,", "randomly if True, to zero otherwise. Returns: torch.Tensor: waveform of", "/ (1 + momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the", "return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform:", "(min_log_hz - f_min) / f_sp logstep = math.log(6.4) / 27.0", "1) output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom # unpack", "And now the nonlinear scale min_log_hz = 1000.0 min_log_mel =", "per channel enorm = 2.0 / (f_pts[2:n_mels + 2] -", "< 0: window_end -= window_start window_start = 0 if not", "sample_rate: float, frame_length: float = 25.0, frame_shift: float = 10.0,", "(str): File format. channels_first (bool): When True, both the input", "length of the expected output. rand_init (bool): Initializes phase randomly", "win_length: int ) -> Tensor: r\"\"\" Apply median smoothing to", "2]) # (new_bins, freq, 2) complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1", "Frequency that we down-sample the signal to. Must be more", "= freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq def sliding_window_cmn( waveform: Tensor,", "= x_db.size() packed_channels = shape[-3] if x_db.dim() > 2 else", "False) Relevant if ``frames_per_chunk > 0``. recompute_frame (int, optional): Only", "0.5).to(torch.int64) return x_mu def mu_law_decoding( x_mu: Tensor, quantization_channels: int )", "among all the lags is very close to the first", "optional): Cost factor for FO change. (default: 0.1) lowpass_cutoff (float,", "<NAME> and <NAME>, \"Signal estimation from modified short-time Fourier transform,\"", "assign to the masked columns axis (int): Axis to apply", "2 + 1`` and ``n_fft`` is the number of Fourier", "torch.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu", "to the left. (bool, default = false) norm_vars (bool, optional):", "torch.linspace(m_min, m_max, n_mels + 2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) #", "torch.round(phase / (2 * math.pi)) # Compute Phase Accum phase", "= _combine_max(half, best) indices = best[1] # Add back minimal", "of freq of dimension (..., frame) \"\"\" input_shape = waveform.shape", "allow pitch tracking to introduce into the feature processing (affects", "None): Array length of the expected output. rand_init (bool): Initializes", "# Replace by torch.norm once issue is fixed # https://github.com/pytorch/pytorch/issues/34279", "axis (int): Axis to apply masking on (2 -> frequency,", "t - cmn_window window_end = t + 1 if window_start", "masked will be uniformly sampled from [0, mask_param] mask_value (float):", "instead. normalized (bool): Whether to normalize by magnitude after stft", "s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] s2 =", "(Hz) (Default: ``3400``). Returns: Tensor: Tensor of freq of dimension", "# create overlapping triangles zero = torch.zeros(1) down_slopes = (-1.0", "to between -1 and 1 and returns a signal encoded", "p=2, dim=-1) phase = angle_1 - angle_0 - phase_advance phase", "without modifying pitch by a factor of ``rate``. Args: complex_specgrams", "time) sample_rate (int): Sample rate of the audio waveform pad", "[1]. This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.", "down-sample the signal to. Must be more than twice lowpass-cutoff.", "in the log-scale part min_log_hz = 1000.0 min_log_mel = (min_log_hz", "are supported') # unpack batch specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])", "-= (frame_to_remove ** 2) if window_end > last_window_end: frame_to_add =", "center (bool, optional): whether to pad :attr:`waveform` on both sides", "2) complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long()) complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps +", "r\"\"\"Compute the angle of complex tensor input. Args: complex_tensor (Tensor):", "[0, 0, 0, 2]) # (new_bins, freq, 2) complex_specgrams_0 =", "ValueError('Only Frequency and Time masking are supported') device = specgrams.device", "** 2)) variance = torch.pow(variance, -0.5) cmn_waveform[:, t, :] *=", "if (fb.max(dim=0).values == 0.).any(): warnings.warn( \"At least one mel filterbank", "value) mask_start = (min_value.long()).squeeze() mask_end = (min_value.long() + value.long()).squeeze() assert", "Real spectrogram (channel, freq, time) mask_param (int): Number of columns", "mel point and each stft freq point in hertz f_diff", "_combine_max( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor], thresh: float =", "# Per batch example masking specgrams = specgrams.transpose(axis, -1) specgrams.masked_fill_((mask", "2: specgram[:, :, mask_start:mask_end] = mask_value else: raise ValueError('Only Frequency", "bits_per_sample ) bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\", f\"{sample_rate}\"]],", "= 5, max_frames_latency: int = 0, frames_per_chunk: int = 0,", "the same mask interval. Args: specgram (Tensor): Real spectrogram (channel,", "max(t + 1, min_cmn_window) if window_end > num_frames: window_start -=", "etc. normalized (bool): Whether to normalize by magnitude after stft.", "of spectrograms from a raw audio signal. The spectrogram can", "n_fft: int, hop_length: int, win_length: int, power: float, normalized: bool,", "Maximum frequency (Hz) n_mels (int): Number of mel filterbanks sample_rate", "energy per channel enorm = 2.0 / (f_pts[2:n_mels + 2]", "specgram.size() specgram = specgram.reshape(1, -1, shape[-1]) assert win_length >= 3", "math import warnings from typing import Optional, Tuple import torch", "be approx constant energy per channel enorm = 2.0 /", "batch freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq def sliding_window_cmn(", ":py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional): Changes the bit depth for the", "if window_end > last_window_end: frame_to_add = waveform[:, last_window_end, :] cur_sum", "device = specgrams.device dtype = specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device,", "(Tensor): The input waveform of shape `(..., time)`. sample_rate (float):", "than a multiplicative factor of the second, elementwise. \"\"\" mask", "n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output =", "effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform:", "-> torch.Tensor: \"\"\"Extract pitch based on method described in [1].", "mel_scale == \"htk\": return 2595.0 * math.log10(1.0 + (freq /", "# pack batch shape = specgram.size() specgram = specgram.reshape(1, -1,", "(batch, channel, freq, time) mask_param (int): Number of columns to", "freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq def sliding_window_cmn( waveform:", "sample_rate: int, pad: int, window: Tensor, n_fft: int, hop_length: int,", ") -> torch.Tensor: \"\"\"Extract pitch based on method described in", "int(math.ceil(sample_rate * frame_time)) waveform_length = waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length /", "Tensor: Input after mu-law encoding \"\"\" mu = quantization_channels -", "complex-valued spectrogram with shape `(..., 2)` into its magnitude and", "of integer squared denom = n * (n + 1)", "time), freq is ``n_fft // 2 + 1`` and ``n_fft``", "masking on (2 -> frequency, 3 -> time) Returns: Tensor:", "for `n_mels` ({n_mels}) may be set too high. \" f\"Or,", "the previous iterate tprev = rebuilt # Invert with our", "(-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels) up_slopes", "(0, p)) # Compute lags output_lag = [] for lag", "Tensor]: r\"\"\"Separate a complex-valued spectrogram with shape `(..., 2)` into", "second, elementwise. \"\"\" mask = (a[0] > thresh * b[0])", "(fb matrix) of size (``n_freqs``, ``n_mels``) meaning number of frequencies", "meaning number of frequencies to highlight/apply to x the number", "* norm_0 real_stretch = mag * torch.cos(phase_acc) imag_stretch = mag", "1000.0 min_log_mel = (min_log_hz - f_min) / f_sp logstep =", "automatic speech recognition <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>", "otherwise. Returns: torch.Tensor: waveform of (..., time), where time equals", "= torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel = torch.arange(-n, n +", "more gives sharper filter. (default: 1) upsample_filter_width (int, optional): Integer", "to the 1D tensor over the given window. \"\"\" #", "mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) # Per batch example", "float, n_mels: int, sample_rate: int, norm: Optional[str] = None, mel_scale:", "from [0, mask_param] mask_value (float): Value to assign to the", "window_end = 0 if center: window_start = t - cmn_window", "from first if bigger than a multiplicative factor of the", "the audio waveform. format (str): File format. channels_first (bool): When", "and phase of the complex tensor \"\"\" mag = complex_norm(complex_tensor,", "length in milliseconds. (default: 25.0) frame_shift (float, optional): Frame shift", "2013. * [3] <NAME> and <NAME>, \"Signal estimation from modified", "``[time, channel]``. compression (float): Used for formats other than WAV.", "if normalized: warnings.warn( \"The argument normalized is not used in", "of lags, then the latter is taken. \"\"\" lag_min =", "mu_law_decoding( x_mu: Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Decode mu-law", "axis: int ) -> Tensor: r\"\"\" Apply a mask along", "<NAME>. \"A fast Griffin-Lim algorithm,\" IEEE Workshop on Applications of", "= waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] output_frames = (", "columns to be masked will be uniformly sampled from [0,", "Args: waveform (Tensor): Tensor of audio of dimension (..., time)", "frequencies to highlight/apply to x the number of filterbanks. Each", "torch.nn.functional.pad(waveform, (0, p)) # Compute lags output_lag = [] for", "num_feats, dtype=dtype, device=device) for t in range(num_frames): window_start = 0", ":] cur_sum -= frame_to_remove if norm_vars: cur_sumsq -= (frame_to_remove **", "= torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi / float(n_mels)", "torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi() def", "v)``. All examples will have the same mask interval. Args:", "audio waveform norm (Optional[str]): If 'slaney', divide the triangular mel", "+ torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)", "frequencies to highlight/apply f_min (float): Minimum frequency (Hz) f_max (float):", "`(batch, channel, freq, time)`. multiplier (float): Use 10. for power", ":] = waveform[:, t, :] - cur_sum / window_frames if", "delta_pitch( float, optional): Smallest relative change in pitch that our", "(Tensor, Tensor): The magnitude and phase of the complex tensor", "nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low) indices = _find_max_per_frame(nccf, sample_rate,", "1000, resample_frequency: float = 4000, delta_pitch: float = 0.005, nccf_ballast:", "angle(complex_tensor) return mag, phase def phase_vocoder( complex_specgrams: Tensor, rate: float,", "device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas = time_steps % 1.0 phase_0 = angle(complex_specgrams[...,", "// 2``) win_length (int): Window size. (Default: ``n_fft``) power (float):", "Reference which the output will be scaled by. power (float):", "** (-9) # Number of lags to check lags =", "Compute the spectral centroid for each channel along the time", "soft_min_f0: float = 10.0, penalty_factor: float = 0.1, lowpass_cutoff: float", "Fill in the linear scale f_min = 0.0 f_sp =", "is the file size divided by the frame-shift. This makes", "device=specgram.device).reshape((-1, 1)) freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim) /", "\"\"\" mu = quantization_channels - 1.0 if not x.is_floating_point(): x", "x_db.reshape(shape) return x_db def DB_to_amplitude( x: Tensor, ref: float, power:", "_mel_to_hz(mels: Tensor, mel_scale: str = \"htk\") -> Tensor: \"\"\"Convert mel", "is not used in Griffin-Lim, \" \"and will be removed", "phases inverse = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window,", "can be either magnitude-only or complex. Args: waveform (Tensor): Tensor", "return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) def magphase( complex_tensor: Tensor, power:", "Time masking are supported') device = specgrams.device dtype = specgrams.dtype", "5) max_frames_latency (int, optional): Maximum number of frames of latency", "the given window. \"\"\" # Centered windowed pad_length = (win_length", "nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, ) result", "same mask interval. Args: specgram (Tensor): Real spectrogram (channel, freq,", "import warnings from typing import Optional, Tuple import torch from", "signal analysis in python.\" In Proceedings of the 14th python", "3 freqs = f_min + f_sp * mels # And", "* torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1) /", "current estimate of the phases inverse = torch.istft(specgram * angles,", "231, 2]) \"\"\" # pack batch shape = complex_specgrams.size() complex_specgrams", "Args: waveform (Tensor): Audio data. Must be 2 dimensional. See", "= 0.005, nccf_ballast: float = 7000, lowpass_filter_width: int = 1,", "of hop between STFT windows. ( Default: ``win_length // 2``)", "v)``, where ``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0``", "Args: specgram (Tensor): Real spectrogram (channel, freq, time) mask_param (int):", "* angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length).float() # Rebuild the", "channels_first=channels_first, format=format) return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate:", "number of frames. (default: True) Returns: Tensor: Pitch feature. Shape:", "depth for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.", "number of frames used for energy normalization. (default: 0) simulate_first_pass_online", "in decibels. A reasonable number is 80. (Default: ``None``) Returns:", "({n_mels}) may be set too high. \" f\"Or, the value", "have the form `(batch, channel, freq, time)`. multiplier (float): Use", "= 200.0 / 3 mels = (freq - f_min) /", "1025, 512 >>> # (channel, freq, time, complex=2) >>> complex_specgrams", "(adds latency only at start). Only applicable if center ==", "1`` and ``n_fft`` is the number of Fourier bins, and", "columns axis (int): Axis to apply masking on (2 ->", "not None: # Expand batch shape = x_db.size() packed_channels =", "of frame :math:`i`, :math:`E(j)` is the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\"", "mel_scale: str = \"htk\", ) -> Tensor: r\"\"\"Create a frequency", ") bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first,", "values, weighted by their magnitude. Args: waveform (Tensor): Tensor of", "be uniformly sampled from [0, mask_param] mask_value (float): Value to", "= 30, freq_low: int = 85, freq_high: int = 3400,", "time equals the ``length`` parameter if given. \"\"\" assert momentum", "sample_rate, channels_first, compression, format, encoding, bits_per_sample ) bytes.seek(0) augmented, _", "40, 1000) >>> delta = compute_deltas(specgram) >>> delta2 = compute_deltas(delta)", "dimension (..., freq, time) sample_rate (int): The sample rate of", "10.0, penalty_factor: float = 0.1, lowpass_cutoff: float = 1000, resample_frequency:", "complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps = torch.arange(0, complex_specgrams.size(-2),", "dim=-1) norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1) phase = angle_1 -", "Reference: - A pitch extraction algorithm tuned for automatic speech", "# unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized:", "if norm is not None and norm != \"slaney\": raise", "applied/multiplied to each frame/window n_fft (int): Size of FFT hop_length", "angles.sin()], dim=-1) \\ .to(dtype=specgram.dtype, device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles) # And", "# \"replicate\" padding in any dimension indices = torch.nn.functional.pad( indices,", "is the spectrogram coeffcients at time :math:`t`, :math:`N` is ``(win_length-1)//2``.", "spectrum is returned instead. normalized (bool): Whether to normalize by", "frame (Default: ``10 ** (-2)``). win_length (int, optional): The window", "# unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch", "If false, window is to the left. (bool, default =", "import torchaudio __all__ = [ \"spectrogram\", \"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\",", "float ) -> Tensor: r\"\"\"Turn a tensor from the decibel", "torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu) / mu) * 2 -", "channel dimension and have the form `(batch, channel, freq, time)`.", "window_start: window_end - window_start, :] cur_sum += torch.sum(input_part, 1) if", "unpack batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f", "normalized (bool): Whether to normalize by magnitude after stft center", "# Convert indices to frequency EPSILON = 10 ** (-9)", "compression, format, encoding, bits_per_sample ) bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file(", "half of results to avoid redundancy. Default: ``True`` Returns: Tensor:", "Number of mfc coefficients to retain n_mels (int): Number of", "a[0] + ~mask * b[0] indices = mask * a[1]", "log-scale part min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min)", "(Tensor): Real spectrogram (channel, freq, time) mask_param (int): Number of", "if given. \"\"\" assert momentum < 1, 'momentum={} > 1", "advance in each bin. Dimension of (freq, 1) Returns: Tensor:", "tensor. Shape of `(..., )` \"\"\" # Replace by torch.norm", "compression (float): Used for formats other than WAV. For mor", "x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu) / mu)", "a mask along ``axis``. Mask will be applied from indices", "(Default: ``85``). freq_high (int, optional): Highest frequency that can be", "= torch.tensor(0.) for _ in range(n_iter): # Store the previous", "time axis. The spectral centroid is defined as the weighted", "frame/window n_fft (int): Size of FFT hop_length (int): Length of", "Tensor, n_fft: int, hop_length: int, win_length: int, power: float, normalized:", "\"slaney\".') if mel_scale == \"htk\": return 700.0 * (10.0**(mels /", "of frequencies to highlight/apply f_min (float): Minimum frequency (Hz) f_max", "dtype=dtype, device=device) cmn_waveform = torch.zeros( num_channels, num_frames, num_feats, dtype=dtype, device=device)", "torch.rand(batch, freq, frames) else: angles = torch.zeros(batch, freq, frames) angles", "window_frames if norm_vars: if window_frames == 1: cmn_waveform[:, t, :]", "between -1 and 1 and returns a signal encoded with", "complex_specgrams = torch.randn(2, freq, 300, 2) >>> rate = 1.3", "(int): Two sided padding of signal window (Tensor): Window tensor", "the left. (bool, default = false) norm_vars (bool, optional): If", "@_mod_utils.requires_sox() def apply_codec( waveform: Tensor, sample_rate: int, format: str, channels_first:", "frame_size)[..., :num_of_frames, :] s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames,", "t, :] = waveform[:, t, :] - cur_sum / window_frames", "torch.tensor(0.) for _ in range(n_iter): # Store the previous iterate", "momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the final phase estimates", "``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``. \"\"\" bytes", "0) e.g., 1 for energy, 2 for power, etc. If", "= specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) specgram = specgram.pow(1", "dimension of `(..., freq, ceil(time/rate), complex=2)` Example >>> freq, hop_length", "average of the frequency values, weighted by their magnitude. Args:", "both the input and output Tensor have dimension ``[channel, time]``.", ":math:`b_i` is the beginning of frame :math:`i`, :math:`E(j)` is the", "# -*- coding: utf-8 -*- import io import math import", "0.1 * x), power) def _hz_to_mel(freq: float, mel_scale: str =", "2) if window_end > last_window_end: frame_to_add = waveform[:, last_window_end, :]", ":math:`d_t` is the deltas at time :math:`t`, :math:`c_t` is the", "top_db (float or None, optional): Minimum negative cut-off in decibels.", "to faster convergence, but above 1 may not converge. length", "shape[-2], shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1,", "x.to(torch.float) mu = torch.tensor(mu, dtype=x.dtype) x_mu = torch.sign(x) * torch.log1p(mu", "\"The argument normalized is not used in Griffin-Lim, \" \"and", "upsample_filter_width: int = 5, max_frames_latency: int = 0, frames_per_chunk: int", "waveform of (..., time), where time equals the ``length`` parameter", "frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width,", "parameter if given. \"\"\" assert momentum < 1, 'momentum={} >", "\"Signal estimation from modified short-time Fourier transform,\" IEEE Trans. ASSP,", "NCCF, apply centered median smoothing, and convert to frequency. Note:", "function will output features that correspond to what an online", "music signal analysis in python.\" In Proceedings of the 14th", "is not None: if power == 1.0: return spec_f.abs() return", "Default: ``win_length // 2``) win_length (int): Window size. (Default: ``n_fft``)", "float, normalized: bool, n_iter: int, momentum: float, length: Optional[int], rand_init:", "power) # randomly initialize the phase batch, freq, frames =", "+ 1, min_cmn_window) if window_end > num_frames: window_start -= (window_end", "introduce into the feature processing (affects output only if ``frames_per_chunk", "specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform: Tensor, sample_rate: int,", "And initialize the previous iterate to 0 rebuilt = torch.tensor(0.)", "NCCF for quiet frames (default: 7000) lowpass_filter_width (int, optional): Integer", "- phase_advance phase = phase - 2 * math.pi *", "(Tensor): Tensor of audio of dimension (..., freq, time) win_length", "frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast,", "``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int, optional): The number of frames", "described in [1]. This function computes the equivalent of `compute-kaldi-pitch-feats`", "to power. If 0.5, will compute DB to amplitude. Returns:", "mu = quantization_channels - 1.0 if not x.is_floating_point(): x =", "dtype=x_mu.dtype) x = ((x_mu) / mu) * 2 - 1.0", "5, mode: str = \"replicate\" ) -> Tensor: r\"\"\"Compute delta", "of dimension (..., frame) \"\"\" input_shape = waveform.shape num_frames, num_feats", "the number of Fourier bins, and time is the number", "/ (2 * math.pi)) # Compute Phase Accum phase =", "& (mask < mask_end), mask_value) specgrams = specgrams.transpose(axis, -1) return", "of dimension (..., freq, time) cmn_window (int, optional): Window in", "the power/amplitude scale to the decibel scale. The output of", "num_feats) num_channels = waveform.shape[0] dtype = waveform.dtype device = waveform.device", "freq, time)`. multiplier (float): Use 10. for power and 20.", "2)`` where the last dimension corresponds to pitch and NCCF.", ") # Update our phase estimates angles = rebuilt if", "not None: if power == 1.0: return spec_f.abs() return spec_f.abs().pow(power)", "high. \" f\"Or, the value for `n_freqs` ({n_freqs}) may be", "frame_to_remove = waveform[:, last_window_start, :] cur_sum -= frame_to_remove if norm_vars:", ":-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] s2 = waveform[..., lag:].unfold(-1, frame_size,", "iterate to 0 rebuilt = torch.tensor(0.) for _ in range(n_iter):", "# And initialize the previous iterate to 0 rebuilt =", "mask along ``axis``. Mask will be applied from indices ``[v_0,", "specgrams = specgrams.transpose(axis, -1) return specgrams def mask_along_axis( specgram: Tensor,", "True) Returns: Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where", "+ cmn_window else: window_start = t - cmn_window window_end =", "of columns to be masked will be uniformly sampled from", "that the number of frames is the file size divided", "masked columns axis (int): Axis to apply masking on (1", "details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional): Changes the bit depth", "so that the :math:`t`-th frame is centered at time :math:`t", "if ``frames_per_chunk > 0``. (default: 500) snip_edges (bool, optional): If", "raise ValueError('Only Frequency and Time masking are supported') # unpack", "from typing import Optional, Tuple import torch from torch import", "the norm. (Default: `1.0`). Returns: Tensor: Power of the normed", "`(..., time)`. sample_rate (float): Sample rate of `waveform`. frame_length (float,", "== 1.0: return spec_f.abs() return spec_f.abs().pow(power) return torch.view_as_real(spec_f) def griffinlim(", "channel]``. \"\"\" bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression,", "decoding \"\"\" mu = quantization_channels - 1.0 if not x_mu.is_floating_point():", "(bool, default = false) Returns: Tensor: Tensor of freq of", "Tensor: Output tensor in decibel scale \"\"\" x_db = multiplier", "- v)``. All examples will have the same mask interval.", "estimates waveform = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window,", "+ 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels", "mask_param min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)", "v_0 + v)``, where ``v`` is sampled from ``uniform(0, mask_param)``,", "= torch.stack([angles.cos(), angles.sin()], dim=-1) \\ .to(dtype=specgram.dtype, device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles)", "Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been scaled", "batch specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram def compute_deltas(", "taken. \"\"\" lag_min = int(math.ceil(sample_rate / freq_high)) # Find near", "frames used for energy normalization. (default: 0) simulate_first_pass_online (bool, optional):", "to assign to the masked columns axis (int): Axis to", "\"complex_norm\", \"angle\", \"magphase\", \"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\", ]", "(``n_mels``, ``n_mfcc``). \"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k =", "Note: If the max among all the lags is very", "mag = complex_norm(complex_tensor, power) phase = angle(complex_tensor) return mag, phase", "bits_per_sample (int, optional): Changes the bit depth for the supported", "applied in soft way, must not exceed min-f0 (default: 10.0)", "n_freqs: int, f_min: float, f_max: float, n_mels: int, sample_rate: int,", "mask_end - mask_start < mask_param if axis == 1: specgram[:,", "dimensional. See also ```channels_first```. sample_rate (int): Sample rate of the", "``n_mfcc``). \"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1)", "determines filter width of lowpass filter, more gives sharper filter.", "(Default: ``\"replicate\"``) Returns: Tensor: Tensor of deltas of dimension (...,", "- mask_start < mask_param if axis == 1: specgram[:, mask_start:mask_end]", "so that the number of frames is the file size", "int = 600, min_cmn_window: int = 100, center: bool =", "after stft center (bool, optional): whether to pad :attr:`waveform` on", "cur_sumsq -= (frame_to_remove ** 2) if window_end > last_window_end: frame_to_add", "split into snippets vs. a full clip. Args: x (Tensor):", "apply masking on (1 -> frequency, 2 -> time) Returns:", "than twice lowpass-cutoff. (default: 4000) delta_pitch( float, optional): Smallest relative", "in Mels \"\"\" if mel_scale not in ['slaney', 'htk']: raise", "waveform: torch.Tensor, sample_rate: float, frame_length: float = 25.0, frame_shift: float", "dtype = specgram.dtype # pack batch shape = specgram.size() specgram", "phase_advance = torch.linspace( >>> 0, math.pi * hop_length, freq)[..., None]", "window. \"\"\" # Centered windowed pad_length = (win_length - 1)", "Sample rate of `waveform`. frame_length (float, optional): Frame length in", "DCT transformation matrix with shape (``n_mels``, ``n_mfcc``), normalized depending on", "pad_mode='reflect', normalized=False, onesided=True, return_complex=True, ) ) # Update our phase", "of hop between STFT windows win_length (int): Window size Returns:", "mu = quantization_channels - 1.0 if not x_mu.is_floating_point(): x_mu =", "x: Tensor, ref: float, power: float ) -> Tensor: r\"\"\"Turn", "angles = angles - tprev.mul_(momentum / (1 + momentum)) angles", "/ specgram.sum(dim=freq_dim) @_mod_utils.requires_sox() def apply_codec( waveform: Tensor, sample_rate: int, format:", "Value to assign to the masked columns axis (int): Axis", "power == 1.0: return spec_f.abs() return spec_f.abs().pow(power) return torch.view_as_real(spec_f) def", "specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) specgram = specgram.pow(1 /", "of \"htk\" or \"slaney\".') if mel_scale == \"htk\": return 700.0", "m_min = _hz_to_mel(f_min, mel_scale=mel_scale) m_max = _hz_to_mel(f_max, mel_scale=mel_scale) m_pts =", "ignored if center==true (int, default = 100) center (bool, optional):", "f_min) / f_sp logstep = math.log(6.4) / 27.0 log_t =", "= 0 if center: window_start = t - cmn_window //", "_ in range(n_iter): # Store the previous iterate tprev =", "If 'slaney', divide the triangular mel weights by the width", "int, n_mels: int, norm: Optional[str] ) -> Tensor: r\"\"\"Create a", "true, the function will output features that correspond to what", "pitch and NCCF. Reference: - A pitch extraction algorithm tuned", "median smoothing (in number of frames) (Default: ``30``). freq_low (int,", "Audio and Acoustics (pp. 1-4), Oct. 2013. * [3] <NAME>", "/ freq_high)) # Find near enough max that is smallest", "+ 1 if window_start < 0: window_end -= window_start window_start", "win_length=win_length, window=window, length=length).float() # Rebuild the spectrogram rebuilt = torch.view_as_real(", "Only relevant for compatibility with online pitch extraction. A non-critical", "of a complex tensor. Shape of `(..., )` \"\"\" return", "['slaney', 'htk']: raise ValueError('mel_scale should be one of \"htk\" or", "at start of decoding (adds latency only at start). Only", "Only applicable if center == false, ignored if center==true (int,", "pitch extraction. A non-critical parameter; the frame at which we", "waveform[:, t, :] - cur_sum / window_frames if norm_vars: if", "values def detect_pitch_frequency( waveform: Tensor, sample_rate: int, frame_time: float =", "int, hop_length: int, win_length: int, ) -> Tensor: r\"\"\" Compute", "(float): Maximum frequency (Hz) n_mels (int): Number of mel filterbanks", "Log10(max(reference value and amin)) top_db (float or None, optional): Minimum", "must be one of None or 'slaney'\") # freq bins", "difference between each mel point and each stft freq point", "of frames is the file size divided by the frame-shift.", "import Tensor from torchaudio._internal import module_utils as _mod_utils import torchaudio", "sample_rate, frame_time, freq_low) indices = _find_max_per_frame(nccf, sample_rate, freq_high) indices =", "window_end > last_window_end: frame_to_add = waveform[:, last_window_end, :] cur_sum +=", "dtype=dtype, device=device) else: variance = cur_sumsq variance = variance /", "hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True, ) )", "mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. Args: specgrams", "and convert to frequency. Note: If the max among all", "if ``frames_per_chunk > 0``. recompute_frame (int, optional): Only relevant for", "will be uniformly sampled from [0, mask_param] mask_value (float): Value", "time) \"\"\" # pack batch shape = specgram.size() specgram =", "waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :] s2 = waveform[..., lag:].unfold(-1,", "to clamp ``x`` db_multiplier (float): Log10(max(reference value and amin)) top_db", "is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v", "Axis to apply masking on (2 -> frequency, 3 ->", "io import math import warnings from typing import Optional, Tuple", "for LowPass filter (Hz) (default: 1000) resample_frequency (float, optional): Frequency", "\"\"\" mag = complex_norm(complex_tensor, power) phase = angle(complex_tensor) return mag,", "sample_rate (int): Sample rate of the audio waveform. format (str):", "depending on norm. Args: n_mfcc (int): Number of mfc coefficients", "\"\"\" # pack batch shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1]", "* power) def angle( complex_tensor: Tensor ) -> Tensor: r\"\"\"Compute", "normalized: bool, n_iter: int, momentum: float, length: Optional[int], rand_init: bool", "\"\"\" Take value from first if bigger than a multiplicative", "= specgram.pow(1 / power) # randomly initialize the phase batch,", "tensor in power/amplitude scale. \"\"\" return ref * torch.pow(torch.pow(10.0, 0.1", "ValueError(\"norm must be one of None or 'slaney'\") # freq", "optional): Integer that determines filter width when upsampling NCCF. (default:", "(int): Number of mel filterbanks sample_rate (int): Sample rate of", "rand_init: bool ) -> Tensor: r\"\"\"Compute waveform from a linear", "= torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu) / mu) * 2", "magnitude and phase. Args: complex_tensor (Tensor): Tensor shape of `(...,", "is the beginning of frame :math:`i`, :math:`E(j)` is the energy", "assert mask_end - mask_start < mask_param if axis == 1:", "logstep = math.log(6.4) / 27.0 if freq >= min_log_hz: mels", "complex_norm(complex_tensor, power) phase = angle(complex_tensor) return mag, phase def phase_vocoder(", "float = 1.0 ) -> Tuple[Tensor, Tensor]: r\"\"\"Separate a complex-valued", "+ ~mask * b[1] return values, indices def _find_max_per_frame( nccf:", "2, device=specgram.device).reshape((-1, 1)) freq_dim = -2 return (freqs * specgram).sum(dim=freq_dim)", "= 1, upsample_filter_width: int = 5, max_frames_latency: int = 0,", "if norm_vars: if window_frames == 1: cmn_waveform[:, t, :] =", "in milliseconds. (default: 10.0) min_f0 (float, optional): Minimum F0 to", "false) norm_vars (bool, optional): If true, normalize variance to one.", "Mels \"\"\" if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale", "center: window_start = t - cmn_window // 2 window_end =", "-> Tensor: r\"\"\" Apply median smoothing to the 1D tensor", "channel along the time axis. The spectral centroid is defined", "= 200.0 / 3 freqs = f_min + f_sp *", "= slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels) fb =", "dimension and have the form `(batch, channel, freq, time)`. multiplier", "# pack batch shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] +", "mask_value else: raise ValueError('Only Frequency and Time masking are supported')", "waveform = torch.nn.functional.pad(waveform, (0, p)) # Compute lags output_lag =", "r\"\"\"Separate a complex-valued spectrogram with shape `(..., 2)` into its", "spectrograms from a raw audio signal. The spectrogram can be", "range(1, lags + 1): s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[...,", "value for `n_mels` ({n_mels}) may be set too high. \"", "has all zero values. \" f\"The value for `n_mels` ({n_mels})", "(int, optional): Window in frames for running average CMN computation", "window_start < 0: window_end -= window_start window_start = 0 if", "-0.5) cmn_waveform[:, t, :] *= variance cmn_waveform = cmn_waveform.view(input_shape[:-2] +", "(float): Frequency in Mels \"\"\" if mel_scale not in ['slaney',", "1 empirical calibration offset indices += 1 return indices def", "the max among all the lags is very close to", "\" \"and will be removed in v0.9.0 release. To suppress", "(float or None): Exponent for the magnitude spectrogram, (must be", "= 10 ** (-2), win_length: int = 30, freq_low: int", "max_f0 (float, optional): Maximum F0 to search for (Hz) (default:", "In Proceedings of the 14th python in science conference, pp.", "value from first if bigger than a multiplicative factor of", "``frames_per_chunk > 0``. recompute_frame (int, optional): Only relevant for compatibility", "torch.Tensor: \"\"\"Extract pitch based on method described in [1]. This", "= None, bits_per_sample: Optional[int] = None, ) -> Tensor: r\"\"\"", "- f_min) / f_sp logstep = math.log(6.4) / 27.0 log_t", "Number of mel filterbanks norm (str or None): Norm to", "Exponent for the magnitude spectrogram, (must be > 0) e.g.,", "# Return the final phase estimates waveform = torch.istft(specgram *", "time) pad (int): Two sided padding of signal window (Tensor):", "if True, to zero otherwise. Returns: torch.Tensor: waveform of (...,", "n_freqs (int): Number of frequencies to highlight/apply f_min (float): Minimum", "Brian, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. \"librosa: Audio", "None] mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) # Per batch", "of the phases inverse = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length,", "(default: 4000) delta_pitch( float, optional): Smallest relative change in pitch", "into snippets vs. a full clip. Args: x (Tensor): Input", "values = mask * a[0] + ~mask * b[0] indices", "both sides so that the :math:`t`-th frame is centered at", "torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1)", "multiplicative factor of the second, elementwise. \"\"\" mask = (a[0]", "phase = angle_1 - angle_0 - phase_advance phase = phase", "where time equals the ``length`` parameter if given. \"\"\" assert", "one. (bool, default = false) Returns: Tensor: Tensor of freq", "Tensor: Masked spectrograms of dimensions (batch, channel, freq, time) \"\"\"", "batch spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /=", "0 and quantization_channels - 1 and returns a signal scaled", "``10 ** (-2)``). win_length (int, optional): The window length for", "input with values between 0 and quantization_channels - 1 and", "Tensor: r\"\"\" For each frame, take the highest value of", "input. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` power", "on Applications of Signal Processing to Audio and Acoustics (pp.", "variance / window_frames variance -= ((cur_sum ** 2) / (window_frames", "- value) # Create broadcastable mask mask_start = min_value[..., None,", "(min_value.long() + value.long()).squeeze() assert mask_end - mask_start < mask_param if", ":, mask_start:mask_end] = mask_value else: raise ValueError('Only Frequency and Time", "Window size power (float or None): Exponent for the magnitude", "utf-8 -*- import io import math import warnings from typing", "tensor \"\"\" mag = complex_norm(complex_tensor, power) phase = angle(complex_tensor) return", "\"amplitude_to_DB\", \"DB_to_amplitude\", \"compute_deltas\", \"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\",", "time), where time equals the ``length`` parameter if given. \"\"\"", "specgrams.transpose(axis, -1) return specgrams def mask_along_axis( specgram: Tensor, mask_param: int,", "unpack batch freq = freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq def", "has been scaled to between -1 and 1 and returns", "-= frame_to_remove if norm_vars: cur_sumsq -= (frame_to_remove ** 2) if", "(n_mfcc, n_mels) if norm is None: dct *= 2.0 else:", "delta coefficients of a tensor, usually a spectrogram: .. math::", "Input spectrogram(s) before being converted to decibel scale. Input should", "too high. \" f\"Or, the value for `n_freqs` ({n_freqs}) may", "normalized=False, onesided=onesided, return_complex=True, ) # unpack batch spec_f = spec_f.reshape(shape[:-1]", "(bool): Whether to normalize by magnitude after stft. n_iter (int):", "mag, phase def phase_vocoder( complex_specgrams: Tensor, rate: float, phase_advance: Tensor", "onesided (bool, optional): controls whether to return half of results", "rand_init: angles = 2 * math.pi * torch.rand(batch, freq, frames)", "compute_deltas(specgram) >>> delta2 = compute_deltas(delta) \"\"\" device = specgram.device dtype", "Tensor: r\"\"\" Compute the spectral centroid for each channel along", "formats other than WAV. For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding", "IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984. Args: specgram", "pp.236–243, Apr. 1984. Args: specgram (Tensor): A magnitude-only STFT spectrogram", "lowpass_cutoff: float = 1000, resample_frequency: float = 4000, delta_pitch: float", "min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel)) return freqs def", "None: if power == 1.0: return spec_f.abs() return spec_f.abs().pow(power) return", "and returns a signal scaled between -1 and 1. Args:", "float, optional): Smallest relative change in pitch that our algorithm", "see :py:func:`torchaudio.backend.sox_io_backend.save`. encoding (str, optional): Changes the encoding for the", "angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1 = torch.norm(complex_specgrams_1, p=2,", "top_db is not None: # Expand batch shape = x_db.size()", "before being converted to power/amplitude scale. ref (float): Reference which", "0.).any(): warnings.warn( \"At least one mel filterbank has all zero", "== ceil(300 / 1.3) torch.Size([2, 1025, 231, 2]) \"\"\" #", "freq, time)`. Batched inputs should include a channel dimension and", "half = torch.max(nccf[..., lag_min:half_size], -1) best = _combine_max(half, best) indices", "cmn_waveform[:, t, :] *= variance cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames,", "Args: x (Tensor): Input tensor quantization_channels (int): Number of channels", "freqs (float): Frequencies in Hz mel_scale (str, optional): Scale to", "= waveform.shape[0] dtype = waveform.dtype device = waveform.device last_window_start =", "masking specgrams = specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >= mask_start) & (mask", "# Equivalent filterbank construction by Librosa all_freqs = torch.linspace(0, sample_rate", "(Default: `1.0`). Returns: Tensor: Power of the normed input tensor.", "that is applied/multiplied to each frame/window n_fft (int): Size of", "denom # unpack batch output = output.reshape(shape) return output def", "1 can be unstable'.format(momentum) assert momentum >= 0, 'momentum={} <", "\\frac{\\sum_{n=1}^{\\text{N}} n (c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2} where :math:`d_t` is", "If 0.5, will compute DB to amplitude. Returns: Tensor: Output", "indices += lag_min # Add 1 empirical calibration offset indices", "mask_end), mask_value) specgrams = specgrams.transpose(axis, -1) return specgrams def mask_along_axis(", "vs. a full clip. Args: x (Tensor): Input spectrogram(s) before", "dimension (..., freq, frames) where freq is ``n_fft // 2", "power/amplitude scale. Args: x (Tensor): Input tensor before being converted", "centroid for each channel along the time axis. The spectral", "Mode parameter passed to padding (Default: ``\"replicate\"``) Returns: Tensor: Tensor", "based on method described in [1]. This function computes the", "signal. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This", "rate, device=complex_specgrams.device, dtype=complex_specgrams.dtype) alphas = time_steps % 1.0 phase_0 =", "> 0: # TODO add \"with torch.no_grad():\" back when JIT", "<NAME>, and <NAME>. \"librosa: Audio and music signal analysis in", "one of \"htk\" or \"slaney\".') if mel_scale == \"htk\": return", "((x_mu + 1) / 2 * mu + 0.5).to(torch.int64) return", "= specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end),", "num_frames, num_feats) num_channels = waveform.shape[0] dtype = waveform.dtype device =", "simulate_first_pass_online (bool, optional): If true, the function will output features", "Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension", "torch.zeros(1) down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] #", "one mel filterbank has all zero values. \" f\"The value", "median smoothing. Args: waveform (Tensor): Tensor of audio of dimension", "see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal", "(bool, optional): If true, use a window centered on the", "-*- import io import math import warnings from typing import", "0.5) * k) # size (n_mfcc, n_mels) if norm is", "centered at time :math:`t \\times \\text{hop\\_length}`. Default: ``True`` pad_mode (string,", "f_min (float): Minimum frequency (Hz) f_max (float): Maximum frequency (Hz)", "modified short-time Fourier transform,\" IEEE Trans. ASSP, vol.32, no.2, pp.236–243,", "converted to decibel scale. Input should take the form `(...,", "`(..., complex=2)` Return: Tensor: Angle of a complex tensor. Shape", "= phase - 2 * math.pi * torch.round(phase / (2", "freq_low: int ) -> Tensor: r\"\"\" Compute Normalized Cross-Correlation Function", "pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1, win_length, 1) values, _ =", "the extent possible, modulo end effects). If false, window is", "window_frames = window_end - window_start last_window_start = window_start last_window_end =", "0 if center: window_start = t - cmn_window // 2", "** (-2)``). win_length (int, optional): The window length for median", "the deltas at time :math:`t`, :math:`c_t` is the spectrogram coeffcients", "num_frames if window_start < 0: window_start = 0 if last_window_start", "\"ortho\" dct[0] *= 1.0 / math.sqrt(2.0) dct *= math.sqrt(2.0 /", "or ``slaney``. (Default: ``htk``) Returns: freqs (Tensor): Mels converted in", "phase = phase + phase_advance phase = torch.cat([phase_0, phase[..., :-1]],", "Tensor, mask_param: int, mask_value: float, axis: int ) -> Tensor:", "torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_waveform = torch.zeros( num_channels, num_frames, num_feats,", "sample_rate, frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch,", "mu-law encoded signal. For more info see the `Wikipedia Entry", "window=window, length=length).float() # Rebuild the spectrogram rebuilt = torch.view_as_real( torch.stft(", "return nccf def _combine_max( a: Tuple[Tensor, Tensor], b: Tuple[Tensor, Tensor],", "import io import math import warnings from typing import Optional,", "4000) delta_pitch( float, optional): Smallest relative change in pitch that", "mel filterbanks norm (str or None): Norm to use (either", "(to the extent possible, modulo end effects). If false, window", "waveform pad (int): Two sided padding of signal window (Tensor):", "f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) # calculate the difference between each", "waveform = waveform.reshape(-1, shape[-1]) # default values are consistent with", "optional): Integer that determines filter width of lowpass filter, more", "Sample rate of the audio waveform norm (Optional[str]): If 'slaney',", "factor of ``rate``. Args: complex_specgrams (Tensor): Dimension of `(..., freq,", "with online pitch extraction. A non-critical parameter; the frame at", "if freq >= min_log_hz: mels = min_log_mel + math.log(freq /", "that the :math:`t`-th frame is centered at time :math:`t \\times", "n_mfcc: int, n_mels: int, norm: Optional[str] ) -> Tensor: r\"\"\"Create", "`normalized=False`.\") # pack batch shape = specgram.size() specgram = specgram.reshape([-1]", "A of size (..., ``n_freqs``), the applied result would be", "= torch.rand(1) * (specgram.size(axis) - value) mask_start = (min_value.long()).squeeze() mask_end", "(int, optional): Only relevant for compatibility with online pitch extraction.", "Fill in the log-scale part min_log_hz = 1000.0 min_log_mel =", "r\"\"\"Create a frequency bin conversion matrix. Args: n_freqs (int): Number", "/ 2 * mu + 0.5).to(torch.int64) return x_mu def mu_law_decoding(", "= angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1 = torch.norm(complex_specgrams_1,", "specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:]) return specgram def compute_deltas( specgram:", "frequency (Hz) n_mels (int): Number of mel filterbanks sample_rate (int):", "momentum: float, length: Optional[int], rand_init: bool ) -> Tensor: r\"\"\"Compute", "number of filterbanks. Each column is a filterbank so that", "padding (Default: ``\"replicate\"``) Returns: Tensor: Tensor of deltas of dimension", "the bit depth for the supported formats. For more details", "(pp. 1-4), Oct. 2013. * [3] <NAME> and <NAME>, \"Signal", "float = 7000, lowpass_filter_width: int = 1, upsample_filter_width: int =", "1.0 / math.sqrt(2.0) dct *= math.sqrt(2.0 / float(n_mels)) return dct.t()", "frame :math:`i`, :math:`E(j)` is the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\" EPSILON", "== \"slaney\": # Slaney-style mel is scaled to be approx", "Frame length in milliseconds. (default: 25.0) frame_shift (float, optional): Frame", "spectrogram coeffcients at time :math:`t`, :math:`N` is ``(win_length-1)//2``. Args: specgram", "(x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1)) # Repack", "0 recovers the original Griffin-Lim method. Values near 1 can", "'slaney'\") # freq bins # Equivalent filterbank construction by Librosa", "device=device, dtype=dtype) * mask_param min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) *", "by their magnitude. Args: waveform (Tensor): Tensor of audio of", "# Fill in the linear scale f_min = 0.0 f_sp", "not converge. length (int or None): Array length of the", "3: raise ValueError('Only Frequency and Time masking are supported') device", "Tensor: Dimension (..., time) \"\"\" specgram = spectrogram(waveform, pad=pad, window=window,", "import Optional, Tuple import torch from torch import Tensor from", "given window. \"\"\" # Centered windowed pad_length = (win_length -", "only at start). Only applicable if center == false, ignored", "is the default. (default: False) Relevant if ``frames_per_chunk > 0``.", "10 ** (-9) # Number of lags to check lags", "window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True, ) ) # Update", "batch output = output.reshape(shape) return output def _compute_nccf( waveform: Tensor,", "frames for running average CMN computation (int, default = 600)", "dtype=dtype) * (specgrams.size(axis) - value) # Create broadcastable mask mask_start", "= int(math.ceil(sample_rate / freq_low)) frame_size = int(math.ceil(sample_rate * frame_time)) waveform_length", "feature. Shape: ``(batch, frames 2)`` where the last dimension corresponds", "1000) resample_frequency (float, optional): Frequency that we down-sample the signal", "power: float = 1.0 ) -> Tensor: r\"\"\"Compute the norm", "f_min: float, f_max: float, n_mels: int, sample_rate: int, norm: Optional[str]", "Input after mu-law encoding \"\"\" mu = quantization_channels - 1.0", "window used at start of decoding (adds latency only at", "# (n_mels + 1) slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) #", "is fixed # https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def angle(", "None): Norm to use (either 'ortho' or None) Returns: Tensor:", "Add 1 empirical calibration offset indices += 1 return indices", "decoding (adds latency only at start). Only applicable if center", "7000) lowpass_filter_width (int, optional): Integer that determines filter width of", "see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional): Changes the bit depth for", "indices, (pad_length, 0), mode=\"constant\", value=0. ) indices[..., :pad_length] = torch.cat(pad_length", "* mu + 0.5).to(torch.int64) return x_mu def mu_law_decoding( x_mu: Tensor,", "back when JIT supports it waveform = torch.nn.functional.pad(waveform, (pad, pad),", "if norm is None: dct *= 2.0 else: assert norm", "specgram.reshape([-1] + list(shape[-2:])) value = torch.rand(1) * mask_param min_value =", "decoding -- not the final version of the features, which", "# Compute lags output_lag = [] for lag in range(1,", "/= window.pow(2.).sum().sqrt() if power is not None: if power ==", "t, :] - cur_sum / window_frames if norm_vars: if window_frames", "filterbank has all zero values. \" f\"The value for `n_mels`", "quantization_channels - 1.0 if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu", "`(..., )` \"\"\" return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) def magphase(", "angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the final phase estimates waveform = torch.istft(specgram", "n_freqs) # calculate mel freq bins m_min = _hz_to_mel(f_min, mel_scale=mel_scale)", "int, f_min: float, f_max: float, n_mels: int, sample_rate: int, norm:", "freq, frames = specgram.size() if rand_init: angles = 2 *", "(default: 0) frames_per_chunk (int, optional): The number of frames used", "of the signal energy. Relevant if ``frames_per_chunk > 0``. (default:", "Tensor: r\"\"\"Create a spectrogram or a batch of spectrograms from", "``win_length // 2``) win_length (int): Window size. (Default: ``n_fft``) power", "batch shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps", "Tensor, power: float = 1.0 ) -> Tensor: r\"\"\"Compute the", "optional): Increasing this factor reduces NCCF for quiet frames (default:", "quantization_channels (int): Number of channels Returns: Tensor: Input after mu-law", "in the linear part f_min = 0.0 f_sp = 200.0", "else: assert norm == \"ortho\" dct[0] *= 1.0 / math.sqrt(2.0)", "(float, optional): Minimum F0 to search for (Hz) (default: 50.0)", "hop_length = 1025, 512 >>> # (channel, freq, time, complex=2)", "is smallest best = torch.max(nccf[..., lag_min:], -1) half_size = nccf.shape[-1]", "window_frames variance -= ((cur_sum ** 2) / (window_frames ** 2))", "spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs =", "n_mels + 2) # create overlapping triangles zero = torch.zeros(1)", "window is to the left. (bool, default = false) norm_vars", "ValueError('Only Frequency and Time masking are supported') # unpack batch", "= torch.linspace(0, sample_rate // 2, n_freqs) # calculate mel freq", "optional): Minimum F0 to search for (Hz) (default: 50.0) max_f0", "formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample (int, optional): Changes", "rate: float, phase_advance: Tensor ) -> Tensor: r\"\"\"Given a STFT", "= 0.0 f_sp = 200.0 / 3 freqs = f_min", "2595.0) - 1.0) # Fill in the linear scale f_min", "an audio clip split into snippets vs. a full clip.", "Accum phase = phase + phase_advance phase = torch.cat([phase_0, phase[...,", "1) if norm_vars: cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1,", "3 n = (win_length - 1) // 2 # twice", "`(..., freq, time)`. Batched inputs should include a channel dimension", "Dimension (..., time) \"\"\" specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft,", "bin numbers to frequencies. Args: mels (Tensor): Mel frequencies mel_scale", "if window_start > last_window_start: frame_to_remove = waveform[:, last_window_start, :] cur_sum", "waveform = torch.nn.functional.pad(waveform, (pad, pad), \"constant\") # pack batch shape", "scale to the power/amplitude scale. Args: x (Tensor): Input tensor", "`(..., )` \"\"\" # Replace by torch.norm once issue is", "float = 0.1, lowpass_cutoff: float = 1000, resample_frequency: float =", "window.pow(2.).sum().sqrt() if power is not None: if power == 1.0:", "time, complex=2)` rate (float): Speed-up factor phase_advance (Tensor): Expected phase", "a complex tensor. Shape of `(..., )` \"\"\" return torch.atan2(complex_tensor[...,", ":math:`t \\times \\text{hop\\_length}`. Default: ``True`` pad_mode (string, optional): controls the", ":math:`N` is the length of a frame, :math:`b_i` is the", "low.\" ) return fb def create_dct( n_mfcc: int, n_mels: int,", "(int): Number of frequencies to highlight/apply f_min (float): Minimum frequency", "data of size (``n_mels``, ``n_mfcc``). \"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n =", "= \"replicate\" ) -> Tensor: r\"\"\"Compute delta coefficients of a", "tensor, usually a spectrogram: .. math:: d_t = \\frac{\\sum_{n=1}^{\\text{N}} n", "modulo end effects). If false, window is to the left.", "% 1.0 phase_0 = angle(complex_specgrams[..., :1, :]) # Time Padding", "``30``). freq_low (int, optional): Lowest frequency that can be detected", "negative cut-off in decibels. A reasonable number is 80. (Default:", "of dimensions (batch, channel, freq, time) \"\"\" if axis !=", "``True``. Default: ``\"reflect\"`` onesided (bool, optional): controls whether to return", "# default values are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft(", "= torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu =", "indices: Tensor, win_length: int ) -> Tensor: r\"\"\" Apply median", "the value for `n_freqs` ({n_freqs}) may be set too low.\"", "where :math:`d_t` is the deltas at time :math:`t`, :math:`c_t` is", "waveform: Tensor, pad: int, window: Tensor, n_fft: int, hop_length: int,", "(default: 7000) lowpass_filter_width (int, optional): Integer that determines filter width", "signal based on mu-law companding. For more info see the", "used at start of decoding (adds latency only at start).", "bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression, format, encoding,", "be set too low.\" ) return fb def create_dct( n_mfcc:", "max_frames_latency (int, optional): Maximum number of frames of latency that", "- all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2) # create overlapping", "are supported') device = specgrams.device dtype = specgrams.dtype value =", "mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``. All examples", "of iteration for phase recovery process. momentum (float): The momentum", "30% >>> phase_advance = torch.linspace( >>> 0, math.pi * hop_length,", "= (win_length - 1) // 2 # twice sum of", "from torchaudio._internal import module_utils as _mod_utils import torchaudio __all__ =", "str, channels_first: bool = True, compression: Optional[float] = None, encoding:", "* torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu +", "indices = torch.nn.functional.pad( indices, (pad_length, 0), mode=\"constant\", value=0. ) indices[...,", "None and norm == \"slaney\": # Slaney-style mel is scaled", "t - cmn_window // 2 window_end = window_start + cmn_window", "compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float, frame_length: float = 25.0, frame_shift:", "Signal Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049. \"\"\"", "``v_0`` from ``uniform(0, max_v - v)``. All examples will have", "soft_min_f0, penalty_factor, lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk,", "``85``). freq_high (int, optional): Highest frequency that can be detected", "= torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,", "> last_window_end: frame_to_add = waveform[:, last_window_end, :] cur_sum += frame_to_add", "float = 10.0, penalty_factor: float = 0.1, lowpass_cutoff: float =", "search for (Hz) (default: 50.0) max_f0 (float, optional): Maximum F0", "converted to power/amplitude scale. ref (float): Reference which the output", "from a raw audio signal. The spectrogram can be either", "= 100, center: bool = False, norm_vars: bool = False,", "float = 4000, delta_pitch: float = 0.005, nccf_ballast: float =", "'momentum={} < 0'.format(momentum) if normalized: warnings.warn( \"The argument normalized is", "of frames of latency that we allow pitch tracking to", "range(n_iter): # Store the previous iterate tprev = rebuilt #", "(..., freq, time) cmn_window (int, optional): Window in frames for", "by torch.norm once issue is fixed # https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5", "indices def _median_smoothing( indices: Tensor, win_length: int ) -> Tensor:", "x_mu.is_floating_point(): x_mu = x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype) x =", "lag :math:`m`, :math:`w` is the waveform, :math:`N` is the length", "of size (``n_mels``, ``n_mfcc``). \"\"\" # http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II n = torch.arange(float(n_mels))", "num_feats, dtype=dtype, device=device) cmn_waveform = torch.zeros( num_channels, num_frames, num_feats, dtype=dtype,", "https://github.com/pytorch/pytorch/issues/34279 return complex_tensor.pow(2.).sum(-1).pow(0.5 * power) def angle( complex_tensor: Tensor )", "shape[-3] if x_db.dim() > 2 else 1 x_db = x_db.reshape(-1,", "math.sqrt(2.0) dct *= math.sqrt(2.0 / float(n_mels)) return dct.t() def mu_law_encoding(", "STFT windows. ( Default: ``win_length // 2``) win_length (int): Window", "decibel scale. The output of each tensor in a batch", "channels_first, compression, format, encoding, bits_per_sample ) bytes.seek(0) augmented, _ =", "(Tensor): Tensor shape of `(..., complex=2)` power (float): Power of", "its magnitude and phase. Args: complex_tensor (Tensor): Tensor shape of", "imag_stretch = mag * torch.sin(phase_acc) complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)", "r\"\"\" Compute Normalized Cross-Correlation Function (NCCF). .. math:: \\phi_i(m) =", "our estimate of the signal energy. Relevant if ``frames_per_chunk >", "by. power (float): If power equals 1, will compute DB", "If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``. \"\"\"", "<NAME>., <NAME>., & <NAME>. \"A fast Griffin-Lim algorithm,\" IEEE Workshop", "and <NAME>, \"Signal estimation from modified short-time Fourier transform,\" IEEE", "kernel, groups=specgram.shape[1]) / denom # unpack batch output = output.reshape(shape)", "that is smallest best = torch.max(nccf[..., lag_min:], -1) half_size =", "raise ValueError('Only Frequency and Time masking are supported') device =", "(frame_to_remove ** 2) if window_end > last_window_end: frame_to_add = waveform[:,", "lowpass filter, more gives sharper filter. (default: 1) upsample_filter_width (int,", "Returns: torch.Tensor: waveform of (..., time), where time equals the", "mel_scale: str = \"htk\") -> Tensor: \"\"\"Convert mel bin numbers", "alphas = time_steps % 1.0 phase_0 = angle(complex_specgrams[..., :1, :])", "or complex. Args: waveform (Tensor): Tensor of audio of dimension", "the NCCF at frame :math:`i` with lag :math:`m`, :math:`w` is", "# TODO add \"with torch.no_grad():\" back when JIT supports it", "phase randomly if True, to zero otherwise. Returns: torch.Tensor: waveform", "(Tensor): Input tensor quantization_channels (int): Number of channels Returns: Tensor:", "frame_shift: float = 10.0, min_f0: float = 50, max_f0: float", "compatibility with online pitch extraction. A non-critical parameter; the frame", "a[1] + ~mask * b[1] return values, indices def _find_max_per_frame(", "use `normalized=False`.\") # pack batch shape = specgram.size() specgram =", "top_db).view(-1, 1, 1, 1)) # Repack batch x_db = x_db.reshape(shape)", "= quantization_channels - 1.0 if not x_mu.is_floating_point(): x_mu = x_mu.to(torch.float)", "of mel filterbanks sample_rate (int): Sample rate of the audio", "return augmented @_mod_utils.requires_kaldi() def compute_kaldi_pitch( waveform: torch.Tensor, sample_rate: float, frame_length:", "= torch.cumsum(phase, -1) mag = alphas * norm_1 + (1", "retain n_mels (int): Number of mel filterbanks norm (str or", "- 1) // 2 # \"replicate\" padding in any dimension", "= \"htk\") -> float: r\"\"\"Convert Hz to Mels. Args: freqs", "(n + 1) * (2 * n + 1) /", "momentum (float): The momentum parameter for fast Griffin-Lim. Setting this", "mel_scale=mel_scale) # calculate the difference between each mel point and", "= torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor,", "spec_f.abs().pow(power) return torch.view_as_real(spec_f) def griffinlim( specgram: Tensor, window: Tensor, n_fft:", "waveform (Tensor): Tensor of audio of dimension (..., time) sample_rate", "+= 1 return indices def _median_smoothing( indices: Tensor, win_length: int", "= torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi / float(n_mels) * (n +", ") -> Tensor: r\"\"\"Compute the angle of complex tensor input.", ":1, :]) # Time Padding complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0,", "def apply_codec( waveform: Tensor, sample_rate: int, format: str, channels_first: bool", "spectrogram from the power/amplitude scale to the decibel scale. The", "returns a signal encoded with values from 0 to quantization_channels", "Tensor: r\"\"\" Apply codecs as a form of augmentation. Args:", "100) center (bool, optional): If true, use a window centered", "frame at which we recompute some of the forward pointers,", "hertz f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)", "masking are supported') device = specgrams.device dtype = specgrams.dtype value", "1.0 if not x.is_floating_point(): x = x.to(torch.float) mu = torch.tensor(mu,", "win_length=win_length, window=window, length=length) # unpack batch waveform = waveform.reshape(shape[:-2] +", "= specgram.reshape(1, -1, shape[-1]) assert win_length >= 3 n =", ">>> freq, hop_length = 1025, 512 >>> # (channel, freq,", "and Time masking are supported') # unpack batch specgram =", "> 0`` and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int, optional): The", "x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) x_db -= multiplier *", "quantization_channels: int ) -> Tensor: r\"\"\"Encode signal based on mu-law", "Cost factor for FO change. (default: 0.1) lowpass_cutoff (float, optional):", "clip. Args: x (Tensor): Input spectrogram(s) before being converted to", "+ waveform.shape[-1:]) return waveform def amplitude_to_DB( x: Tensor, multiplier: float,", "-= multiplier * db_multiplier if top_db is not None: #", "to amplitude. Returns: Tensor: Output tensor in power/amplitude scale. \"\"\"", "Tensor, cmn_window: int = 600, min_cmn_window: int = 100, center:", "n = torch.arange(float(n_mels)) k = torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi /", "complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long()) angle_0 = angle(complex_specgrams_0) angle_1", "= 4000, delta_pitch: float = 0.005, nccf_ballast: float = 7000,", "extraction. A non-critical parameter; the frame at which we recompute", "For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting Tensor. If", "(default: 500) snip_edges (bool, optional): If this is set to", "= torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))", "0.5, will compute DB to amplitude. Returns: Tensor: Output tensor", "freq, frames) angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \\ .to(dtype=specgram.dtype, device=specgram.device)", "snippets vs. a full clip. Args: x (Tensor): Input spectrogram(s)", "``frames_per_chunk > 0`` and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int, optional):", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME> 2014 IEEE International", "tuned for automatic speech recognition <NAME>, <NAME>, <NAME>, <NAME>, <NAME>", ":math:`N` is ``(win_length-1)//2``. Args: specgram (Tensor): Tensor of audio of", "angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length) # unpack batch waveform", "mask_param (int): Number of columns to be masked will be", "input waveform of shape `(..., time)`. sample_rate (float): Sample rate", "0`` and ``simulate_first_pass_online=True``) (default: 0) frames_per_chunk (int, optional): The number", "/ 3 freqs = f_min + f_sp * mels #", "smoothing to the 1D tensor over the given window. \"\"\"", "if window_frames == 1: cmn_waveform[:, t, :] = torch.zeros( num_channels,", "(Tensor): Tensor of audio of dimension (..., time) sample_rate (int):", "recognition <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME> 2014 IEEE", "= waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate, frame_length, frame_shift,", "2 # \"replicate\" padding in any dimension indices = torch.nn.functional.pad(", "norm is None: dct *= 2.0 else: assert norm ==", "is implemented using normalized cross-correlation function and median smoothing. Args:", "231 == ceil(300 / 1.3) torch.Size([2, 1025, 231, 2]) \"\"\"", "variance) normalization per utterance. Args: waveform (Tensor): Tensor of audio", "freqs def create_fb_matrix( n_freqs: int, f_min: float, f_max: float, n_mels:", "(must be > 0) e.g., 1 for energy, 2 for", "If the max among all the lags is very close", "set to false, the incomplete frames near the ending edge", "extraction algorithm tuned for automatic speech recognition <NAME>, <NAME>, <NAME>,", "win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, sample_rate // 2, steps=1", "be unstable'.format(momentum) assert momentum >= 0, 'momentum={} < 0'.format(momentum) if", "(Tensor): Tensor of audio of dimension (..., time) pad (int):", "Mask will be applied from indices ``[v_0, v_0 + v)``,", "snip_edges (bool, optional): If this is set to false, the", "unpack batch output = output.reshape(shape) return output def _compute_nccf( waveform:", "1) / 3 specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel", "of latency that we allow pitch tracking to introduce into", "return different values for an audio clip split into snippets", "= rebuilt # Invert with our current estimate of the", "\"\"\" if norm is not None and norm != \"slaney\":", "Hz \"\"\" if mel_scale not in ['slaney', 'htk']: raise ValueError('mel_scale", "expects an input with values between 0 and quantization_channels -", "int, frame_time: float, freq_low: int ) -> Tensor: r\"\"\" Compute", "(-2), win_length: int = 30, freq_low: int = 85, freq_high:", "torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length).float() # Rebuild", "or None, optional): Minimum negative cut-off in decibels. A reasonable", "\"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\", \"angle\", \"magphase\", \"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\",", "25.0, frame_shift: float = 10.0, min_f0: float = 50, max_f0:", "in [1]. This function computes the equivalent of `compute-kaldi-pitch-feats` from", "with lag :math:`m`, :math:`w` is the waveform, :math:`N` is the", "= spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt() if", "int(math.ceil(sample_rate / freq_low)) frame_size = int(math.ceil(sample_rate * frame_time)) waveform_length =", ":math:`t`, :math:`N` is ``(win_length-1)//2``. Args: specgram (Tensor): Tensor of audio", "freq_high: int ) -> Tensor: r\"\"\" For each frame, take", "spectral centroid for each channel along the time axis. The", ") ) # Update our phase estimates angles = rebuilt", "of that tensor, and so may return different values for", "mean (and optionally variance) normalization per utterance. Args: waveform (Tensor):", "range(num_frames): window_start = 0 window_end = 0 if center: window_start", "(window_end - num_frames) window_end = num_frames if window_start < 0:", "x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu", "(int): Size of FFT, creates ``n_fft // 2 + 1``", "at time :math:`t`, :math:`N` is ``(win_length-1)//2``. Args: specgram (Tensor): Tensor", "freq_low)) frame_size = int(math.ceil(sample_rate * frame_time)) waveform_length = waveform.size()[-1] num_of_frames", "(Hz) n_mels (int): Number of mel filterbanks sample_rate (int): Sample", "device=device) cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cmn_waveform = torch.zeros(", "freq.reshape(shape[:-1] + list(freq.shape[-1:])) return freq def sliding_window_cmn( waveform: Tensor, cmn_window:", ") -> Tensor: r\"\"\" Apply a mask along ``axis``. Mask", "def amplitude_to_DB( x: Tensor, multiplier: float, amin: float, db_multiplier: float,", "the feature processing (affects output only if ``frames_per_chunk > 0``", "_find_max_per_frame( nccf: Tensor, sample_rate: int, freq_high: int ) -> Tensor:", "'htk']: raise ValueError('mel_scale should be one of \"htk\" or \"slaney\".')", "== -1: input_part = waveform[:, window_start: window_end - window_start, :]", "n_fft: int, hop_length: int, win_length: int, power: Optional[float], normalized: bool,", "\"\"\" # Replace by torch.norm once issue is fixed #", ":math:`E(j)` is the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\" EPSILON = 10", "``(batch, frames 2)`` where the last dimension corresponds to pitch", "magnitude after stft. n_iter (int): Number of iteration for phase", "n_iter (int): Number of iteration for phase recovery process. momentum", "if not center: if window_end > t: window_end = max(t", "frame_length (float, optional): Frame length in milliseconds. (default: 25.0) frame_shift", "/ float(n_mels)) return dct.t() def mu_law_encoding( x: Tensor, quantization_channels: int", "(``n_freqs``, ``n_mels``) meaning number of frequencies to highlight/apply to x", "matrix A of size (..., ``n_freqs``), the applied result would", "(batch, channel, freq, time) \"\"\" if axis != 2 and", "torch.max(nccf[..., lag_min:], -1) half_size = nccf.shape[-1] // 2 half =", "-> Tensor: r\"\"\" For each frame, take the highest value", "if pad > 0: # TODO add \"with torch.no_grad():\" back", "\\ .to(dtype=specgram.dtype, device=specgram.device) specgram = specgram.unsqueeze(-1).expand_as(angles) # And initialize the", "win_length (int): Window size Returns: Tensor: Dimension (..., time) \"\"\"", "a form of augmentation. Args: waveform (Tensor): Audio data. Must", "value = torch.rand(1) * mask_param min_value = torch.rand(1) * (specgram.size(axis)", "Compute Normalized Cross-Correlation Function (NCCF). .. math:: \\phi_i(m) = \\frac{\\sum_{n=b_i}^{b_i", "\"\"\" return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0]) def magphase( complex_tensor: Tensor,", "power: float = 1.0 ) -> Tuple[Tensor, Tensor]: r\"\"\"Separate a", "pack batch shape = list(waveform.size()) waveform = waveform.reshape([-1] + shape[-1:])", "n_fft (int): Size of FFT, creates ``n_fft // 2 +", "phase batch, freq, frames = specgram.size() if rand_init: angles =", "waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default values are consistent", "n_fft: int, hop_length: int, win_length: int, ) -> Tensor: r\"\"\"", "< mask_param if axis == 1: specgram[:, mask_start:mask_end] = mask_value", "Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` Return: Tensor:", "point in hertz f_diff = f_pts[1:] - f_pts[:-1] # (n_mels", "Window size Returns: Tensor: Dimension (..., time) \"\"\" specgram =", "controls whether to return half of results to avoid redundancy.", "if len(input_shape) == 2: cmn_waveform = cmn_waveform.squeeze(0) return cmn_waveform def", "\"compute_kaldi_pitch\", \"create_fb_matrix\", \"create_dct\", \"compute_deltas\", \"detect_pitch_frequency\", \"DB_to_amplitude\", \"mu_law_encoding\", \"mu_law_decoding\", \"complex_norm\", \"angle\",", "speed up in time without modifying pitch by a factor", "= torch.arange(0, specgrams.size(axis), device=device, dtype=dtype) # Per batch example masking", "int, hop_length: int, win_length: int, power: Optional[float], normalized: bool, center:", "Returns: Tensor: Input after mu-law encoding \"\"\" mu = quantization_channels", "utterance. Args: waveform (Tensor): Tensor of audio of dimension (...,", "num_feats = input_shape[-2:] waveform = waveform.view(-1, num_frames, num_feats) num_channels =", "angle_0 = angle(complex_specgrams_0) angle_1 = angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0, p=2,", ":] = torch.zeros( num_channels, num_feats, dtype=dtype, device=device) else: variance =", "// 2 half = torch.max(nccf[..., lag_min:half_size], -1) best = _combine_max(half,", "1, upsample_filter_width: int = 5, max_frames_latency: int = 0, frames_per_chunk:", "to pitch and NCCF. Reference: - A pitch extraction algorithm", "mask_value (float): Value to assign to the masked columns axis", "mask_param if axis == 1: specgram[:, mask_start:mask_end] = mask_value elif", "w(n) w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where :math:`\\phi_i(m)` is the NCCF at frame", "= _mel_to_hz(m_pts, mel_scale=mel_scale) # calculate the difference between each mel", "* 2 - 1.0 x = torch.sign(x) * (torch.exp(torch.abs(x) *", "lead to faster convergence, but above 1 may not converge.", "= 1.0 ) -> Tensor: r\"\"\"Compute the norm of complex", "rate of the waveform (Hz) frame_time (float, optional): Duration of", "the latter is taken. \"\"\" lag_min = int(math.ceil(sample_rate / freq_high))", "frame_time (float, optional): Duration of a frame (Default: ``10 **", "= -1 cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq =", "n_mfcc (int): Number of mfc coefficients to retain n_mels (int):", "phase_vocoder( complex_specgrams: Tensor, rate: float, phase_advance: Tensor ) -> Tensor:", "(n + 0.5) * k) # size (n_mfcc, n_mels) if", "For each frame, take the highest value of NCCF, apply", "if window_end > num_frames: window_start -= (window_end - num_frames) window_end", "in Hz mel_scale (str, optional): Scale to use: ``htk`` or", "Tensor: Dimension (..., freq, time), freq is ``n_fft // 2", ") -> Tensor: r\"\"\"Encode signal based on mu-law companding. For", "on Acoustics, Speech and Signal Processing (ICASSP), Florence, 2014, pp.", "normalized=False) freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft", "window_start last_window_start = window_start last_window_end = window_end cmn_waveform[:, t, :]", "\"htk\", ) -> Tensor: r\"\"\"Create a frequency bin conversion matrix.", "maximum value of that tensor, and so may return different", "`(..., complex=2)` power (float): Power of the norm. (Default: `1.0`).", "shape = list(waveform.size()) waveform = waveform.reshape([-1] + shape[-1:]) nccf =", "-> Tensor: r\"\"\"Turn a spectrogram from the power/amplitude scale to", "science conference, pp. 18-25. 2015. * [2] <NAME>., <NAME>., &", "``\"reflect\"`` onesided (bool, optional): controls whether to return half of", "< 1, 'momentum={} > 1 can be unstable'.format(momentum) assert momentum", "If power equals 1, will compute DB to power. If", "norm. Args: n_mfcc (int): Number of mfc coefficients to retain", "-> time) Returns: Tensor: Masked spectrogram of dimensions (channel, freq,", "Apply median smoothing to the 1D tensor over the given", "or \"slaney\".') if mel_scale == \"htk\": return 700.0 * (10.0**(mels", "_mod_utils import torchaudio __all__ = [ \"spectrogram\", \"griffinlim\", \"amplitude_to_DB\", \"DB_to_amplitude\",", "conversion matrix. Args: n_freqs (int): Number of frequencies to highlight/apply", "stft freq point in hertz f_diff = f_pts[1:] - f_pts[:-1]", "the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has", "to the masked columns axis (int): Axis to apply masking", "1) // 2 # twice sum of integer squared denom", "must not exceed min-f0 (default: 10.0) penalty_factor (float, optional): Cost", "create_fb_matrix( n_freqs: int, f_min: float, f_max: float, n_mels: int, sample_rate:", "(default: 0) simulate_first_pass_online (bool, optional): If true, the function will", "sample_rate, freq_high) indices = _median_smoothing(indices, win_length) # Convert indices to", "not None and norm != \"slaney\": raise ValueError(\"norm must be", "Tensor: r\"\"\"Compute waveform from a linear scale magnitude spectrogram using", "the norm of complex tensor input. Args: complex_tensor (Tensor): Tensor", "complex=2)` Example >>> freq, hop_length = 1025, 512 >>> #", "math.log(freq / min_log_hz) / logstep return mels def _mel_to_hz(mels: Tensor,", "* math.pi)) # Compute Phase Accum phase = phase +", "Tensor: Output tensor in power/amplitude scale. \"\"\" return ref *", "(int): Axis to apply masking on (2 -> frequency, 3", "Tensor of freq of dimension (..., frame) \"\"\" input_shape =", "cmn_waveform.squeeze(0) return cmn_waveform def spectral_centroid( waveform: Tensor, sample_rate: int, pad:", "= 3400, ) -> Tensor: r\"\"\"Detect pitch frequency. It is", "of the audio waveform. format (str): File format. channels_first (bool):", "relative change in pitch that our algorithm measures. (default: 0.005)", "time :math:`t`, :math:`N` is ``(win_length-1)//2``. Args: specgram (Tensor): Tensor of", "torchaudio._internal import module_utils as _mod_utils import torchaudio __all__ = [", "axis. The spectral centroid is defined as the weighted average", ">>> # (channel, freq, time, complex=2) >>> complex_specgrams = torch.randn(2,", "- tprev.mul_(momentum / (1 + momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) #", "enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any(): warnings.warn( \"At least one mel", "tprev = rebuilt # Invert with our current estimate of", "or None) Returns: Tensor: The transformation matrix, to be right-multiplied", "\"slaney\": raise ValueError(\"norm must be one of None or 'slaney'\")", "= torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value) # Create", "different types of features give the same number of frames.", "shift in milliseconds. (default: 10.0) min_f0 (float, optional): Minimum F0", "r\"\"\" For each frame, take the highest value of NCCF,", "pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack batch spec_f =", "2.0 else: assert norm == \"ortho\" dct[0] *= 1.0 /", "a raw audio signal. The spectrogram can be either magnitude-only", "+ specgram.shape[-2:]) return specgram def compute_deltas( specgram: Tensor, win_length: int", "(EPSILON + indices.to(torch.float)) # unpack batch freq = freq.reshape(shape[:-1] +", "Array length of the expected output. rand_init (bool): Initializes phase", "use: ``htk`` or ``slaney``. (Default: ``htk``) Returns: freqs (Tensor): Mels", "of `(..., freq, ceil(time/rate), complex=2)` Example >>> freq, hop_length =", "estimates angles = rebuilt if momentum: angles = angles -", "torch.arange(float(n_mfcc)).unsqueeze(1) dct = torch.cos(math.pi / float(n_mels) * (n + 0.5)", "with our current estimate of the phases inverse = torch.istft(specgram", "between each mel point and each stft freq point in", "\"phase_vocoder\", 'mask_along_axis', 'mask_along_axis_iid', 'sliding_window_cmn', \"spectral_centroid\", \"apply_codec\", ] def spectrogram( waveform:", "optional): Window in frames for running average CMN computation (int,", "+ 1`` and ``n_fft`` is the number of Fourier bins,", "DB_to_amplitude( x: Tensor, ref: float, power: float ) -> Tensor:", "all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2) # create overlapping triangles", "def mu_law_encoding( x: Tensor, quantization_channels: int ) -> Tensor: r\"\"\"Encode", "linear part f_min = 0.0 f_sp = 200.0 / 3", "x def complex_norm( complex_tensor: Tensor, power: float = 1.0 )", "with shape `(..., 2)` into its magnitude and phase. Args:", "coefficients of a tensor, usually a spectrogram: .. math:: d_t", "it waveform = torch.nn.functional.pad(waveform, (pad, pad), \"constant\") # pack batch", "they have dimension ``[time, channel]``. compression (float): Used for formats", "dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1) return nccf def", "win_length (int): Window size. (Default: ``n_fft``) power (float): Exponent for", "Args: n_mfcc (int): Number of mfc coefficients to retain n_mels", "right-multiplied to row-wise data of size (``n_mels``, ``n_mfcc``). \"\"\" #", "= torch.zeros(num_channels, num_feats, dtype=dtype, device=device) cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype,", "hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) #", "the decibel scale. The output of each tensor in a", "phase_advance) >>> x.shape # with 231 == ceil(300 / 1.3)", "spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:]) if normalized: spec_f /= window.pow(2.).sum().sqrt()", "(float): Exponent for the magnitude spectrogram, (must be > 0)", "+ momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the final phase", "if axis != 2 and axis != 3: raise ValueError('Only", "this to 0 recovers the original Griffin-Lim method. Values near", "1) Returns: Tensor: Complex Specgrams Stretch with dimension of `(...,", "simulate_first_pass_online, recompute_frame, snip_edges, ) result = result.reshape(shape[:-1] + result.shape[-2:]) return", "For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm", "= waveform.shape waveform = waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform,", "18-25. 2015. * [2] <NAME>., <NAME>., & <NAME>. \"A fast", "-1) return specgrams def mask_along_axis( specgram: Tensor, mask_param: int, mask_value:", "_ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes, effects=[[\"rate\", f\"{sample_rate}\"]], channels_first=channels_first, format=format) return augmented", "optional): Maximum number of frames of latency that we allow", "(mask < mask_end), mask_value) specgrams = specgrams.transpose(axis, -1) return specgrams", "Repack batch x_db = x_db.reshape(shape) return x_db def DB_to_amplitude( x:", "Relevant if ``frames_per_chunk > 0``. recompute_frame (int, optional): Only relevant", "sided padding of signal window (Tensor): Window tensor that is", "(Tensor): Expected phase advance in each bin. Dimension of (freq,", "to quantization_channels - 1. Args: x (Tensor): Input tensor quantization_channels", "optional): Frame shift in milliseconds. (default: 10.0) min_f0 (float, optional):", "filter. (default: 1) upsample_filter_width (int, optional): Integer that determines filter", "not exceed min-f0 (default: 10.0) penalty_factor (float, optional): Cost factor", "and 1. Args: x_mu (Tensor): Input tensor quantization_channels (int): Number", "freq, 300, 2) >>> rate = 1.3 # Speed up", "(and optionally variance) normalization per utterance. Args: waveform (Tensor): Tensor", "rate of the audio waveform pad (int): Two sided padding", "* math.log10(1.0 + (freq / 700.0)) # Fill in the", "batch depends on the maximum value of that tensor, and", "elementwise. \"\"\" mask = (a[0] > thresh * b[0]) values", "+ 1``. window (Tensor): Window tensor that is applied/multiplied to", "specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) value = torch.rand(1) *", "is applied/multiplied to each frame/window n_fft (int): Size of FFT,", "by 30% >>> phase_advance = torch.linspace( >>> 0, math.pi *", "-> Tensor: r\"\"\"Encode signal based on mu-law companding. For more", "= None, mel_scale: str = \"htk\", ) -> Tensor: r\"\"\"Create", "Example >>> specgram = torch.randn(1, 40, 1000) >>> delta =", "use a window centered on the current frame (to the", "up in time without modifying pitch by a factor of", "for `n_freqs` ({n_freqs}) may be set too low.\" ) return", "f\"The value for `n_mels` ({n_mels}) may be set too high.", "mask_start:mask_end] = mask_value else: raise ValueError('Only Frequency and Time masking", "of lags to check lags = int(math.ceil(sample_rate / freq_low)) frame_size", "= complex_specgrams.reshape([-1] + list(shape[-3:])) time_steps = torch.arange(0, complex_specgrams.size(-2), rate, device=complex_specgrams.device,", "which is the default. (default: False) Relevant if ``frames_per_chunk >", "if mel_scale == \"htk\": return 2595.0 * math.log10(1.0 + (freq", "angle_1 = angle(complex_specgrams_1) norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1) norm_1 =", "more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an", "correspond to what an online decoder would see in the", "> thresh * b[0]) values = mask * a[0] +", "module_utils as _mod_utils import torchaudio __all__ = [ \"spectrogram\", \"griffinlim\",", "latency only at start). Only applicable if center == false,", ":] *= variance cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats)) if", "factor phase_advance (Tensor): Expected phase advance in each bin. Dimension", "torch.nn.functional.pad( indices, (pad_length, 0), mode=\"constant\", value=0. ) indices[..., :pad_length] =", "mel_scale: str = \"htk\") -> float: r\"\"\"Convert Hz to Mels.", "windowed pad_length = (win_length - 1) // 2 # \"replicate\"", "if power == 1.0: return spec_f.abs() return spec_f.abs().pow(power) return torch.view_as_real(spec_f)", "= 1025, 512 >>> # (channel, freq, time, complex=2) >>>", "= torch.max(nccf[..., lag_min:], -1) half_size = nccf.shape[-1] // 2 half", "27.0 if freq >= min_log_hz: mels = min_log_mel + math.log(freq", "then the latter is taken. \"\"\" lag_min = int(math.ceil(sample_rate /", "- 1.0 x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) -", "of audio of dimension (..., freq, time) win_length (int, optional):", "_compute_nccf(waveform, sample_rate, frame_time, freq_low) indices = _find_max_per_frame(nccf, sample_rate, freq_high) indices", "* torch.rand(batch, freq, frames) else: angles = torch.zeros(batch, freq, frames)", "n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True, )", "their magnitude. Args: waveform (Tensor): Tensor of audio of dimension", "int, window: Tensor, n_fft: int, hop_length: int, win_length: int, )", "shape[-1]) # default values are consistent with librosa.core.spectrum._spectrogram spec_f =", "10. for power and 20. for amplitude amin (float): Number", "max among all the lags is very close to the", "part f_min = 0.0 f_sp = 200.0 / 3 mels", "# unpack batch waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform", "inputs should include a channel dimension and have the form", "Use 10. for power and 20. for amplitude amin (float):", "/ mu return x def complex_norm( complex_tensor: Tensor, power: float", "* [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1, win_length, 1) values,", "Integer that determines filter width of lowpass filter, more gives", "scale \"\"\" x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) x_db -=", "time) Returns: Tensor: Masked spectrograms of dimensions (batch, channel, freq,", "CMN window used at start of decoding (adds latency only", "f_max (float): Maximum frequency (Hz) n_mels (int): Number of mel", "-= window_start window_start = 0 if not center: if window_end", "waveform (Tensor): Audio data. Must be 2 dimensional. See also", "win_length=win_length, window=window, center=True, pad_mode='reflect', normalized=False, onesided=True, return_complex=True, ) ) #", "sample_rate (int): Sample rate of the audio waveform pad (int):", "a full clip. Args: x (Tensor): Input spectrogram(s) before being", "method described in [1]. This function computes the equivalent of", "dtype = waveform.dtype device = waveform.device last_window_start = last_window_end =", "// 2 + 1``. window (Tensor): Window tensor that is", "-1) return values def detect_pitch_frequency( waveform: Tensor, sample_rate: int, frame_time:", "frames_per_chunk: int = 0, simulate_first_pass_online: bool = False, recompute_frame: int", "f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any(): warnings.warn( \"At", "(2 -> frequency, 3 -> time) Returns: Tensor: Masked spectrograms", "not in ['slaney', 'htk']: raise ValueError('mel_scale should be one of", "audio clip split into snippets vs. a full clip. Args:", "1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1])", "scaled to between -1 and 1 and returns a signal", "2 + 1`` bins hop_length (int): Length of hop between", "(float): If power equals 1, will compute DB to power.", "length=length).float() # Rebuild the spectrogram rebuilt = torch.view_as_real( torch.stft( input=inverse,", "packed_channels, shape[-2], shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) -", "[1] McFee, Brian, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.", "(..., freq, time) win_length (int, optional): The window length used", "mels (float): Frequency in Mels \"\"\" if mel_scale not in", "specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode) kernel = torch.arange(-n, n", "'ortho' or None) Returns: Tensor: The transformation matrix, to be", "None, ) -> Tensor: r\"\"\" Apply codecs as a form", "\"htk\") -> float: r\"\"\"Convert Hz to Mels. Args: freqs (float):", "specgram = specgram.unsqueeze(-1).expand_as(angles) # And initialize the previous iterate to", "== 1: cmn_waveform[:, t, :] = torch.zeros( num_channels, num_feats, dtype=dtype,", "- f_pts[:n_mels]) fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any(): warnings.warn(", ") -> Tensor: r\"\"\"Create a frequency bin conversion matrix. Args:", "audio waveform pad (int): Two sided padding of signal window", "power equals 1, will compute DB to power. If 0.5,", "should be one of \"htk\" or \"slaney\".') if mel_scale ==", "< 0'.format(momentum) if normalized: warnings.warn( \"The argument normalized is not", "(float): Speed-up factor phase_advance (Tensor): Expected phase advance in each", "milliseconds. (default: 25.0) frame_shift (float, optional): Frame shift in milliseconds.", "each stft freq point in hertz f_diff = f_pts[1:] -", "linear scale f_min = 0.0 f_sp = 200.0 / 3", "decibel scale \"\"\" x_db = multiplier * torch.log10(torch.clamp(x, min=amin)) x_db", "masked columns axis (int): Axis to apply masking on (2", "indices = best[1] # Add back minimal lag indices +=", "return spec_f.abs() return spec_f.abs().pow(power) return torch.view_as_real(spec_f) def griffinlim( specgram: Tensor,", "(float): Frequencies in Hz mel_scale (str, optional): Scale to use:", "optional): Smallest relative change in pitch that our algorithm measures.", "be applied from indices ``[v_0, v_0 + v)``, where ``v``", "last_window_end, :] cur_sum += frame_to_add if norm_vars: cur_sumsq += (frame_to_add", "True, both the input and output Tensor have dimension ``[channel,", "end effects). If false, window is to the left. (bool,", "if normalized: spec_f /= window.pow(2.).sum().sqrt() if power is not None:", "\"constant\") # pack batch shape = waveform.size() waveform = waveform.reshape(-1,", "``n_mfcc``), normalized depending on norm. Args: n_mfcc (int): Number of", "Tensor of audio of dimension (..., freq, time) cmn_window (int,", "for the magnitude spectrogram, (must be > 0) e.g., 1", "== \"htk\": return 700.0 * (10.0**(mels / 2595.0) - 1.0)", "the power/amplitude scale. Args: x (Tensor): Input tensor before being", "transformation matrix, to be right-multiplied to row-wise data of size", "phase = torch.cat([phase_0, phase[..., :-1]], dim=-1) phase_acc = torch.cumsum(phase, -1)", "(int, optional): Lowest frequency that can be detected (Hz) (Default:", "/ 700.0)) # Fill in the linear part f_min =", "Axis to apply masking on (1 -> frequency, 2 ->", "list(waveform.size()) waveform = waveform.reshape([-1] + shape[-1:]) nccf = _compute_nccf(waveform, sample_rate,", "if center==true (int, default = 100) center (bool, optional): If", "online pitch extraction. A non-critical parameter; the frame at which", "int, win_length: int, ) -> Tensor: r\"\"\" Compute the spectral", "on the maximum value of that tensor, and so may", "spectrogram can be either magnitude-only or complex. Args: waveform (Tensor):", "hop between STFT windows. ( Default: ``win_length // 2``) win_length", "for energy, 2 for power, etc. normalized (bool): Whether to", "\\text{hop\\_length}`. Default: ``True`` pad_mode (string, optional): controls the padding method", "pp. 18-25. 2015. * [2] <NAME>., <NAME>., & <NAME>. \"A", "of \"htk\" or \"slaney\".') if mel_scale == \"htk\": return 2595.0", "= f_min + f_sp * mels # And now the", "shape of `(..., complex=2)` power (float): Power of the norm.", "a frame, :math:`b_i` is the beginning of frame :math:`i`, :math:`E(j)`", "variance = cur_sumsq variance = variance / window_frames variance -=", "output def _compute_nccf( waveform: Tensor, sample_rate: int, frame_time: float, freq_low:", "(Default: ``30``). freq_low (int, optional): Lowest frequency that can be", "\"\"\"Extract pitch based on method described in [1]. This function", "freq of dimension (..., frame) \"\"\" # pack batch shape", "take the form `(..., freq, time)`. Batched inputs should include", "Returns: Tensor: The transformation matrix, to be right-multiplied to row-wise", "(1 + momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the final", "1.3) torch.Size([2, 1025, 231, 2]) \"\"\" # pack batch shape", "(bool, optional): If true, the function will output features that", "``n_fft``) power (float): Exponent for the magnitude spectrogram, (must be", "assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)", "(n, n), mode=mode) kernel = torch.arange(-n, n + 1, 1,", "= specgrams.dtype value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param min_value", "win_length) # Convert indices to frequency EPSILON = 10 **", "n_mels + 2) f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale) # calculate the", "freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft //", "number is 80. (Default: ``None``) Returns: Tensor: Output tensor in", "None, bits_per_sample: Optional[int] = None, ) -> Tensor: r\"\"\" Apply", "last_window_start = last_window_end = -1 cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype,", "our current estimate of the phases inverse = torch.istft(specgram *", "`(..., freq, time, complex=2)` rate (float): Speed-up factor phase_advance (Tensor):", "is ``n_fft // 2 + 1`` and ``n_fft`` is the", "w(m+n)}{\\sqrt{E(b_i) E(m+b_i)}}, where :math:`\\phi_i(m)` is the NCCF at frame :math:`i`", "\"\"\" device = specgram.device dtype = specgram.dtype # pack batch", "pad_mode (string, optional): controls the padding method used when :attr:`center`", "2 for power, etc. normalized (bool): Whether to normalize by", "to the power/amplitude scale. Args: x (Tensor): Input tensor before", "- 1 and returns a signal scaled between -1 and", "latency that we allow pitch tracking to introduce into the", "* math.pi * torch.rand(batch, freq, frames) else: angles = torch.zeros(batch,", "shape = specgram.size() specgram = specgram.reshape([-1] + list(shape[-2:])) value =", ":] else: if window_start > last_window_start: frame_to_remove = waveform[:, last_window_start,", "to zero otherwise. Returns: torch.Tensor: waveform of (..., time), where", "signal window (Tensor): Window tensor that is applied/multiplied to each", "float = 0.99 ) -> Tuple[Tensor, Tensor]: \"\"\" Take value", "(default: 50.0) max_f0 (float, optional): Maximum F0 to search for", "(..., freq, time) Example >>> specgram = torch.randn(1, 40, 1000)", "spectral centroid is defined as the weighted average of the", "offset indices += 1 return indices def _median_smoothing( indices: Tensor,", "Tensor], b: Tuple[Tensor, Tensor], thresh: float = 0.99 ) ->", "> 0) e.g., 1 for energy, 2 for power, etc.", "False, ) -> Tensor: r\"\"\" Apply sliding-window cepstral mean (and", "the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\" EPSILON = 10 ** (-9)", "int = 3400, ) -> Tensor: r\"\"\"Detect pitch frequency. It", "Tensor: Tensor of freq of dimension (..., frame) \"\"\" input_shape", "phase. Args: complex_tensor (Tensor): Tensor shape of `(..., complex=2)` power", "input_part = waveform[:, window_start: window_end - window_start, :] cur_sum +=", "(int): Window size Returns: Tensor: Dimension (..., time) \"\"\" specgram", "* (10.0**(mels / 2595.0) - 1.0) # Fill in the", "- num_frames) window_end = num_frames if window_start < 0: window_start", "amin (float): Number to clamp ``x`` db_multiplier (float): Log10(max(reference value", "bool, n_iter: int, momentum: float, length: Optional[int], rand_init: bool )", "return mels def _mel_to_hz(mels: Tensor, mel_scale: str = \"htk\") ->", "300, 2) >>> rate = 1.3 # Speed up by", "for the supported formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. bits_per_sample", "window=window, center=center, pad_mode=pad_mode, normalized=False, onesided=onesided, return_complex=True, ) # unpack batch", "waveform_length = waveform.size()[-1] num_of_frames = int(math.ceil(waveform_length / frame_size)) p =", "The input waveform of shape `(..., time)`. sample_rate (float): Sample", "column is a filterbank so that assuming there is a", "/ denom # unpack batch output = output.reshape(shape) return output", "Input after mu-law decoding \"\"\" mu = quantization_channels - 1.0", "= 10 ** (-9) # Number of lags to check", "freq, time) mask_param (int): Number of columns to be masked", "default = 100) center (bool, optional): If true, use a", "x_mu = x_mu.to(torch.float) mu = torch.tensor(mu, dtype=x_mu.dtype) x = ((x_mu)", "window_end - window_start, :] cur_sum += torch.sum(input_part, 1) if norm_vars:", "time without modifying pitch by a factor of ``rate``. Args:", "= angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return the final phase estimates waveform =", "frame) \"\"\" # pack batch shape = list(waveform.size()) waveform =", "for power, etc. normalized (bool): Whether to normalize by magnitude", ":math:`t`, :math:`c_t` is the spectrogram coeffcients at time :math:`t`, :math:`N`", ":] cur_sum += torch.sum(input_part, 1) if norm_vars: cur_sumsq += torch.cumsum(input_part", "1], complex_tensor[..., 0]) def magphase( complex_tensor: Tensor, power: float =", "highlight/apply to x the number of filterbanks. Each column is", "n_fft // 2, device=specgram.device).reshape((-1, 1)) freq_dim = -2 return (freqs", "(s1 * s2).sum(-1) / (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2) /", "the length of a frame, :math:`b_i` is the beginning of", "max_f0: float = 400, soft_min_f0: float = 10.0, penalty_factor: float", "= torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)", "-> time) Returns: Tensor: Masked spectrograms of dimensions (batch, channel,", ":] output_frames = ( (s1 * s2).sum(-1) / (EPSILON +", "value of NCCF, apply centered median smoothing, and convert to", "example masking specgrams = specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >= mask_start) &", "3400, ) -> Tensor: r\"\"\"Detect pitch frequency. It is implemented", "this is set to false, the incomplete frames near the", "= math.log(6.4) / 27.0 log_t = (mels >= min_log_mel) freqs[log_t]", "dimension (..., freq, time) cmn_window (int, optional): Window in frames", "scaled between -1 and 1. Args: x_mu (Tensor): Input tensor", "STFT windows win_length (int): Window size Returns: Tensor: Dimension (...,", "at which we recompute some of the forward pointers, after", "lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame,", "width when upsampling NCCF. (default: 5) max_frames_latency (int, optional): Maximum", "/ 3 mels = (freq - f_min) / f_sp #", "resample_frequency (float, optional): Frequency that we down-sample the signal to.", "batch shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default", "window_end = t + 1 if window_start < 0: window_end", "convert to frequency. Note: If the max among all the", "complex_specgrams: Tensor, rate: float, phase_advance: Tensor ) -> Tensor: r\"\"\"Given", "matrix. Args: n_freqs (int): Number of frequencies to highlight/apply f_min", "device=device) for t in range(num_frames): window_start = 0 window_end =", "formats. For more details see :py:func:`torchaudio.backend.sox_io_backend.save`. Returns: torch.Tensor: Resulting Tensor.", "pack batch shape = complex_specgrams.size() complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:]))", "norm. (Default: `1.0`) Returns: (Tensor, Tensor): The magnitude and phase", "torch from torch import Tensor from torchaudio._internal import module_utils as", "(EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) ) output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag,", "`compute-kaldi-pitch-feats` from Kaldi. Args: waveform (Tensor): The input waveform of", "delta = compute_deltas(specgram) >>> delta2 = compute_deltas(delta) \"\"\" device =", "penalty_factor (float, optional): Cost factor for FO change. (default: 0.1)", "return output def _compute_nccf( waveform: Tensor, sample_rate: int, frame_time: float,", "which we recompute some of the forward pointers, after revising", "channels Returns: Tensor: Input after mu-law decoding \"\"\" mu =", "channel, freq, time) \"\"\" if axis != 2 and axis", "delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges, )", "Masked spectrogram of dimensions (channel, freq, time) \"\"\" # pack", "waveform of shape `(..., time)`. sample_rate (float): Sample rate of", "batch shape = x_db.size() packed_channels = shape[-3] if x_db.dim() >", "fb *= enorm.unsqueeze(0) if (fb.max(dim=0).values == 0.).any(): warnings.warn( \"At least", "= 0, frames_per_chunk: int = 0, simulate_first_pass_online: bool = False,", "Return: Tensor: Angle of a complex tensor. Shape of `(...,", "(..., freq, time) sample_rate (int): The sample rate of the", "(float, optional): Maximum F0 to search for (Hz) (default: 400.0)", "f_sp * mels # And now the nonlinear scale min_log_hz", "False, recompute_frame: int = 500, snip_edges: bool = True, )", "audio of dimension (..., freq, time) win_length (int, optional): The", "num_frames, num_feats = input_shape[-2:] waveform = waveform.view(-1, num_frames, num_feats) num_channels", "inverse = torch.istft(specgram * angles, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, length=length).float()", "f_sp logstep = math.log(6.4) / 27.0 log_t = (mels >=", "phase def phase_vocoder( complex_specgrams: Tensor, rate: float, phase_advance: Tensor )", "mag = alphas * norm_1 + (1 - alphas) *", "= nccf.shape[-1] // 2 half = torch.max(nccf[..., lag_min:half_size], -1) best", "unpack batch complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:]) return complex_specgrams_stretch def", "(specgrams.size(axis) - value) # Create broadcastable mask mask_start = min_value[...,", "to one. (bool, default = false) Returns: Tensor: Tensor of", "default values are consistent with librosa.core.spectrum._spectrogram spec_f = torch.stft( input=waveform,", "frames) where freq is ``n_fft // 2 + 1``. window", "= window_start last_window_end = window_end cmn_waveform[:, t, :] = waveform[:,", "tprev.mul_(momentum / (1 + momentum)) angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles)) # Return", "Tensor], thresh: float = 0.99 ) -> Tuple[Tensor, Tensor]: \"\"\"", "to search for (Hz) (default: 50.0) max_f0 (float, optional): Maximum", "Tensor. If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``.", "= True, ) -> torch.Tensor: \"\"\"Extract pitch based on method", "more than twice lowpass-cutoff. (default: 4000) delta_pitch( float, optional): Smallest", "- min_log_mel)) return freqs def create_fb_matrix( n_freqs: int, f_min: float,", "specgram = specgram.reshape([-1] + list(shape[-2:])) specgram = specgram.pow(1 / power)", "= _hz_to_mel(f_max, mel_scale=mel_scale) m_pts = torch.linspace(m_min, m_max, n_mels + 2)", "centered median smoothing, and convert to frequency. Note: If the", "< 0: window_start = 0 if last_window_start == -1: input_part", "with dimension of `(..., freq, ceil(time/rate), complex=2)` Example >>> freq,", "the number of frames is the file size divided by", "complex_norm( complex_tensor: Tensor, power: float = 1.0 ) -> Tensor:", "# Find near enough max that is smallest best =", "input_shape[-2:] waveform = waveform.view(-1, num_frames, num_feats) num_channels = waveform.shape[0] dtype", "= specgram.unsqueeze(-1).expand_as(angles) # And initialize the previous iterate to 0", "freq, time) sample_rate (int): The sample rate of the waveform", "_ = torch.median(roll, -1) return values def detect_pitch_frequency( waveform: Tensor,", "-> Tensor: r\"\"\"Create a frequency bin conversion matrix. Args: n_freqs", "center (bool, optional): If true, use a window centered on", "pitch tracking to introduce into the feature processing (affects output", "false, the incomplete frames near the ending edge won't be", "cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum /", "from `librosa`. * [1] McFee, Brian, <NAME>, <NAME>, <NAME>, <NAME>,", "rand_init (bool): Initializes phase randomly if True, to zero otherwise.", "corresponds to pitch and NCCF. Reference: - A pitch extraction", "n_mels) up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)", "torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1) roll = indices.unfold(-1, win_length, 1)", "-> Tuple[Tensor, Tensor]: r\"\"\"Separate a complex-valued spectrogram with shape `(...,", "torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1) output", "200.0 / 3 freqs = f_min + f_sp * mels", "= False, norm_vars: bool = False, ) -> Tensor: r\"\"\"", "the Griffin-Lim transformation. Implementation ported from `librosa`. * [1] McFee,", "Size of FFT, creates ``n_fft // 2 + 1`` bins", "waveform: Tensor, sample_rate: int, pad: int, window: Tensor, n_fft: int,", "output_lag.append(output_frames.unsqueeze(-1)) nccf = torch.cat(output_lag, -1) return nccf def _combine_max( a:", "x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1]) x_db = torch.max(x_db, (x_db.amax(dim=(-3,", "so may return different values for an audio clip split", "float, mel_scale: str = \"htk\") -> float: r\"\"\"Convert Hz to", "output Tensor have dimension ``[channel, time]``. Otherwise, they have dimension", "(int, optional): The window length for median smoothing (in number", "lags, then the latter is taken. \"\"\" lag_min = int(math.ceil(sample_rate", "= mask * a[0] + ~mask * b[0] indices =", "win_length: int = 5, mode: str = \"replicate\" ) ->", "-> Tuple[Tensor, Tensor]: \"\"\" Take value from first if bigger", "dimension (..., frame) \"\"\" # pack batch shape = list(waveform.size())", "is the energy :math:`\\sum_{n=j}^{j+N-1} w^2(n)`. \"\"\" EPSILON = 10 **", "t in range(num_frames): window_start = 0 window_end = 0 if", "implemented using normalized cross-correlation function and median smoothing. Args: waveform", "= waveform.reshape(shape[:-2] + waveform.shape[-1:]) return waveform def amplitude_to_DB( x: Tensor,", "!= \"slaney\": raise ValueError(\"norm must be one of None or", "between 0 and quantization_channels - 1 and returns a signal", "Window in frames for running average CMN computation (int, default", "torch.norm(s1, p=2, dim=-1)).pow(2) / (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2) )", "for amplitude amin (float): Number to clamp ``x`` db_multiplier (float):", "spectrograms of dimensions (batch, channel, freq, time) \"\"\" if axis", "float = 400, soft_min_f0: float = 10.0, penalty_factor: float =", "output of each tensor in a batch depends on the", "values between 0 and quantization_channels - 1 and returns a", "Find near enough max that is smallest best = torch.max(nccf[...,", "optional): Duration of a frame (Default: ``10 ** (-2)``). win_length", "Resulting Tensor. If ``channels_first=True``, it has ``[channel, time]`` else ``[time,", "penalty_factor: float = 0.1, lowpass_cutoff: float = 1000, resample_frequency: float", "power) def _hz_to_mel(freq: float, mel_scale: str = \"htk\") -> float:", "(c_{t+n} - c_{t-n})}{2 \\sum_{n=1}^{\\text{N}} n^2} where :math:`d_t` is the deltas", "in power/amplitude scale. \"\"\" return ref * torch.pow(torch.pow(10.0, 0.1 *", "specgrams.transpose(axis, -1) specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)", "lowpass-cutoff. (default: 4000) delta_pitch( float, optional): Smallest relative change in", "frame_size)) p = lags + num_of_frames * frame_size - waveform_length", "optional): controls whether to return half of results to avoid", "-> Tensor: r\"\"\"Decode mu-law encoded signal. For more info see", "math.log10(1.0 + (freq / 700.0)) # Fill in the linear", "number of Fourier bins, and time is the number of", "in ['slaney', 'htk']: raise ValueError('mel_scale should be one of \"htk\"", "(Tensor): Mels converted in Hz \"\"\" if mel_scale not in", "dimensions (batch, channel, freq, time) \"\"\" if axis != 2", "to pad :attr:`waveform` on both sides so that the :math:`t`-th", "dtype=dtype) * mask_param min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis)", "top_db: Optional[float] = None ) -> Tensor: r\"\"\"Turn a spectrogram", "n^2} where :math:`d_t` is the deltas at time :math:`t`, :math:`c_t`", "- value) mask_start = (min_value.long()).squeeze() mask_end = (min_value.long() + value.long()).squeeze()", "that can be detected (Hz) (Default: ``85``). freq_high (int, optional):", "float = 10.0, min_f0: float = 50, max_f0: float =", "mask_start < mask_param if axis == 1: specgram[:, mask_start:mask_end] =", "dtype=dtype, device=device) for t in range(num_frames): window_start = 0 window_end", "each tensor in a batch depends on the maximum value", "\"\"\" bytes = io.BytesIO() torchaudio.backend.sox_io_backend.save(bytes, waveform, sample_rate, channels_first, compression, format,", "scale. Input should take the form `(..., freq, time)`. Batched", "= torch.zeros( num_channels, num_frames, num_feats, dtype=dtype, device=device) for t in", "0 if not center: if window_end > t: window_end =", "from 0 to quantization_channels - 1. Args: x (Tensor): Input", "1, min_cmn_window) if window_end > num_frames: window_start -= (window_end -", "Fourier transform,\" IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.", "window length used for computing delta (Default: ``5``) mode (str,", "of audio of dimension (..., time) pad (int): Two sided", "file size divided by the frame-shift. This makes different types", "math.log(6.4) / 27.0 log_t = (mels >= min_log_mel) freqs[log_t] =", "norm: Optional[str] ) -> Tensor: r\"\"\"Create a DCT transformation matrix", "w^2(n)`. \"\"\" EPSILON = 10 ** (-9) # Number of", "see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with", "channels_first: bool = True, compression: Optional[float] = None, encoding: Optional[str]", "dim=-1) phase = angle_1 - angle_0 - phase_advance phase =", "min=amin)) x_db -= multiplier * db_multiplier if top_db is not", "add \"with torch.no_grad():\" back when JIT supports it waveform =", "of dimension (..., time) pad (int): Two sided padding of", "types of features give the same number of frames. (default:", "= 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels]) fb *=", "frame_to_add if norm_vars: cur_sumsq += (frame_to_add ** 2) window_frames =", "the audio waveform pad (int): Two sided padding of signal", "of frames) (Default: ``30``). freq_low (int, optional): Lowest frequency that", "+ num_of_frames * frame_size - waveform_length waveform = torch.nn.functional.pad(waveform, (0,", "x: Tensor, multiplier: float, amin: float, db_multiplier: float, top_db: Optional[float]", "window=window, n_fft=n_fft, hop_length=hop_length, win_length=win_length, power=1., normalized=False) freqs = torch.linspace(0, sample_rate", "format, encoding, bits_per_sample ) bytes.seek(0) augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file( bytes,", ") -> Tensor: r\"\"\"Given a STFT tensor, speed up in", "cmn_window window_end = t + 1 if window_start < 0:", "the features, which is the default. (default: False) Relevant if", "f_min = 0.0 f_sp = 200.0 / 3 mels =", "resample_frequency, delta_pitch, nccf_ballast, lowpass_filter_width, upsample_filter_width, max_frames_latency, frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges,", "slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)", "torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1)) #", "(channel, freq, time) \"\"\" # pack batch shape = specgram.size()", "the 1D tensor over the given window. \"\"\" # Centered", "** 2) if window_end > last_window_end: frame_to_add = waveform[:, last_window_end,", "the function will output features that correspond to what an", "float, freq_low: int ) -> Tensor: r\"\"\" Compute Normalized Cross-Correlation", "shape = waveform.size() waveform = waveform.reshape(-1, shape[-1]) # default values", "return x def complex_norm( complex_tensor: Tensor, power: float = 1.0", "norm_vars: cur_sumsq -= (frame_to_remove ** 2) if window_end > last_window_end:", "waveform.shape waveform = waveform.reshape(-1, shape[-1]) result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch( waveform, sample_rate,", "0 to quantization_channels - 1. Args: x (Tensor): Input tensor", "-> float: r\"\"\"Convert Hz to Mels. Args: freqs (float): Frequencies", "(int): Number of channels Returns: Tensor: Input after mu-law decoding", "Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)` Example", "examples will have the same mask interval. Args: specgram (Tensor):", "(float, optional): Frame length in milliseconds. (default: 25.0) frame_shift (float,", "raise ValueError('mel_scale should be one of \"htk\" or \"slaney\".') if", "band (area normalization). (Default: ``None``) mel_scale (str, optional): Scale to" ]
[ "def image_callback(self, image_msg, camera): self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg): screen =", "= curses.COLOR_RED status_armed = 'Yes' else: color = curses.COLOR_GREEN status_armed", "color = curses.COLOR_GREEN status_armed = 'No' screen.addstr(row, x_tab, 'Armed: ')", "variables self.status_battery_perc = None self.state = State() self.state_sub = rospy.Subscriber('mavros/state',", "screen.addstr(row, x_tab, 'Mode: ') screen.addstr(row, x_indent, status_mode, get_color(color)) row +=", "import curses import sys from collections import deque from datetime", "PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics = DiagnosticArray()", "self.global_pose = PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics", "curses.COLOR_RED elif severity in severity_yellow: color = curses.COLOR_YELLOW elif severity", "== 'Offboard': color = curses.COLOR_RED else: color = curses.COLOR_BLUE if", "status_extended = 'Takeoff' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED:", "= self.status screen.clear() # rospy.loginfo(status) # print(status) x_tab = 0", "yaw, pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Global", "NavSatFix GPS_FIX_DICT = { 0: ('No GPS', curses.COLOR_RED), 1: ('No", "color, -1) # Default variables self.status_battery_perc = None self.state =", "= ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1) # self.statustext", "= self.global_pose.pose.position q = self.global_pose.pose.orientation quaternion = [q.x, q.y, q.z,", "0 x_indent = 14 row = 0 # Battery battery_percentage", "pose_msg): self.local_pose = pose_msg def global_pose_callback(self, pose_msg): self.global_pose = pose_msg", "import datetime import numpy as np import rospy from diagnostic_msgs.msg", "'' self.screen.keypad(True) curses.curs_set(False) # Hide cursor colors = [curses.COLOR_BLACK, curses.COLOR_BLUE,", "ExtendedState, callback=self.extended_callback, queue_size=1) # self.statustext = StatusText() # self.statustext_sub =", "= status def gps_callback(self, gps_msg): self.gps = gps_msg def local_pose_callback(self,", "extended_callback(self, extended_msg): self.extended = extended_msg def diagnostics_callback(self, diagnostics_msg): for status", "curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground' color =", "log, get_color(color)) self.lines += 1 screen.refresh() def print_status(self): screen =", "'Undefined' color = curses.COLOR_CYAN screen.addstr(row, x_tab, 'State: ') screen.addstr(row, x_indent,", "rospy.init_node('status_node', argv=sys.argv) self.rate = rospy.get_param('~rate', default=1.0) # Curses setup self.screen", "rot = R.from_quat(quaternion) except ValueError: rot = R.from_euler('zyx', [0.0, 0.0,", "2) self.lines = 0 self.text = '' self.screen.keypad(True) curses.curs_set(False) #", "= curses.COLOR_GREEN status_armed = 'No' screen.addstr(row, x_tab, 'Armed: ') screen.addstr(row,", "return curses.color_pair(color) def frequency_from_messages(messages): durations = [] for i in", "self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg): screen = self.console time_str = datetime.now().strftime('%Y-%m-%d", "+= 1 # Global pose p = self.global_pose.pose.position q =", "x_tab, 'Armed: ') screen.addstr(row, x_indent, status_armed, get_color(color)) row += 1", "# Curses color setup curses.use_default_colors() for color in colors: curses.init_pair(color,", "2) yaw, pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab,", "pos: ') screen.addstr(row, x_indent, status_gps) row += 1 # Local", "battery_msg): if battery_msg.location == 'id0': self.battery = battery_msg def state_callback(self,", "# print(status) x_tab = 0 x_indent = 14 row =", "1 # Setpoint v = self.setpoint.velocity vx, vy, vz =", "rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2 ** 24) self.image_subscribers.append(subscriber) def", "StatusText from scipy.spatial.transform import Rotation as R from sensor_msgs.msg import", "color in colors: curses.init_pair(color, color, -1) # Default variables self.status_battery_perc", "== 'Fix type': fix_type, color = GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS", "statustext_msg): screen = self.console time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str", "local_pose_callback(self, pose_msg): self.local_pose = pose_msg def global_pose_callback(self, pose_msg): self.global_pose =", "2 # GPS pos latitude = self.gps.latitude longitude = self.gps.longitude", "status_extended = 'Air' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING:", "import absolute_import, division, print_function import curses import sys from collections", "callback=self.setpoint_callback, queue_size=1) self.cameras = ['front', 'right', 'back', 'left'] self.image_subscribers =", "'Armed: ') screen.addstr(row, x_indent, status_armed, get_color(color)) row += 1 #", "self.cols, 12, 2) self.lines = 0 self.text = '' self.screen.keypad(True)", "def curses_main(screen): StatusNode(screen).run() def main(): try: curses.wrapper(curses_main) except rospy.ROSInterruptException: pass", "setup curses.use_default_colors() for color in colors: curses.init_pair(color, color, -1) #", "= 'Yes' else: color = curses.COLOR_GREEN status_armed = 'No' screen.addstr(row,", "self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback, # queue_size=1) self.gps =", "state_callback(self, state_msg): self.state = state_msg def extended_callback(self, extended_msg): self.extended =", "{ 0: ('No GPS', curses.COLOR_RED), 1: ('No fix', curses.COLOR_RED), 2:", "mode = mode.capitalize() if mode == 'Offboard': color = curses.COLOR_RED", "status.name: self.diagnostic_gps = status def gps_callback(self, gps_msg): self.gps = gps_msg", "status in diagnostics_msg.status: if 'GPS' in status.name: self.diagnostic_gps = status", "self.rate = rospy.get_param('~rate', default=1.0) # Curses setup self.screen = curses.initscr()", "= rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras = ['front', 'right', 'back',", "'Satellites visible': satellites = value.value elif value.key == 'Fix type':", "[msg.WARNING, msg.NOTICE] severity_neutral = [msg.INFO, msg.DEBUG] color = curses.COLOR_CYAN if", "lock', curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA), 5: ('RTK float', curses.COLOR_YELLOW), 6:", "color = curses.COLOR_RED elif severity in severity_yellow: color = curses.COLOR_YELLOW", "in colors: curses.init_pair(color, color, -1) # Default variables self.status_battery_perc =", "5: ('RTK float', curses.COLOR_YELLOW), 6: ('RTK fix', curses.COLOR_GREEN) } def", "GPS', curses.COLOR_RED), 1: ('No fix', curses.COLOR_RED), 2: ('2D lock', curses.COLOR_BLUE),", "get_color(color)) row += 2 # GPS pos latitude = self.gps.latitude", "f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)') row +=", "def setpoint_callback(self, setpoint_msg): self.setpoint = setpoint_msg def image_callback(self, image_msg, camera):", "if severity in severity_red: color = curses.COLOR_RED elif severity in", "k, v in freqs.items()] screen.addstr(row, x_tab, 'Cameras: ') screen.addstr(row, x_indent,", "{z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)') row += 1 #", "Default variables self.status_battery_perc = None self.state = State() self.state_sub =", "x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)", "self.global_pose = pose_msg def setpoint_callback(self, setpoint_msg): self.setpoint = setpoint_msg def", "') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw}", "{fr} {fb} {fl} (front right back left [Hz])') row +=", "self.local_pose = pose_msg def global_pose_callback(self, pose_msg): self.global_pose = pose_msg def", "datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str = datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text severity", "self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics = DiagnosticArray() self.diagnostic_gps", "if mode.startswith('AUTO'): mode = mode.split('.')[-1] mode = mode.capitalize() if mode", "#!/usr/bin/env python3 from __future__ import absolute_import, division, print_function import curses", "self.state_sub = rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1) self.battery = BatteryState() self.battery_sub", "({color})' # screen.addstr(self.lines, 0, log, get_color(color)) self.lines += 1 screen.refresh()", "elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff' color = curses.COLOR_RED", "= 0 # Battery battery_percentage = int(self.battery.percentage * 100) color", "StatusNode: def __init__(self, screen): rospy.init_node('status_node', argv=sys.argv) self.rate = rospy.get_param('~rate', default=1.0)", "msg.DEBUG] color = curses.COLOR_CYAN if severity in severity_red: color =", "height_status, self.cols, 12, 2) self.lines = 0 self.text = ''", "PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose = PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped,", "color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined'", "try: rot = R.from_quat(quaternion) except ValueError: rot = R.from_euler('zyx', [0.0,", "freqs.items()] screen.addstr(row, x_tab, 'Cameras: ') screen.addstr(row, x_indent, f'{ff} {fr} {fb}", "from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus from geometry_msgs.msg import PoseStamped from", "curses.COLOR_RED else: color = curses.COLOR_BLUE if mode == '': mode", "self.screen.getmaxyx() height_status = 15 self.status = curses.newwin(height_status, self.cols, 1, 2)", "duration = messages[i + 1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec()) frequency =", "frequency = 1 / np.mean(durations) if np.isnan(frequency): return 0 return", "severity_yellow: color = curses.COLOR_YELLOW elif severity in severity_neutral: color =", "curses.COLOR_YELLOW elif severity in severity_neutral: color = curses.COLOR_WHITE self.text =", "= curses.COLOR_YELLOW elif battery_percentage > 0: color = curses.COLOR_RED status_battery", "self.diagnostic_gps = status def gps_callback(self, gps_msg): self.gps = gps_msg def", "+= 1 # Extended status if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended", "curses.curs_set(False) # Hide cursor colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN,", "for cam, messages in self.images.items(): freqs[cam] = frequency_from_messages(messages) ff, fr,", "= gps_msg def local_pose_callback(self, pose_msg): self.local_pose = pose_msg def global_pose_callback(self,", "fl = [int(round(v)) for k, v in freqs.items()] screen.addstr(row, x_tab,", "('No fix', curses.COLOR_RED), 2: ('2D lock', curses.COLOR_BLUE), 3: ('3D lock',", "camera): self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg): screen = self.console time_str =", "(front right back left [Hz])') row += 1 screen.refresh() self.screen.refresh()", "self.rows, self.cols = self.screen.getmaxyx() height_status = 15 self.status = curses.newwin(height_status,", "curses.use_default_colors() for color in colors: curses.init_pair(color, color, -1) # Default", "np.isnan(frequency): return 0 return frequency class StatusNode: def __init__(self, screen):", "pose_msg def setpoint_callback(self, setpoint_msg): self.setpoint = setpoint_msg def image_callback(self, image_msg,", "= mode.capitalize() if mode == 'Offboard': color = curses.COLOR_RED else:", "__future__ import absolute_import, division, print_function import curses import sys from", "status_mode, get_color(color)) row += 1 # Extended status if self.extended.landed_state", "= curses.COLOR_RED elif severity in severity_yellow: color = curses.COLOR_YELLOW elif", "4: ('DGPS', curses.COLOR_MAGENTA), 5: ('RTK float', curses.COLOR_YELLOW), 6: ('RTK fix',", "') screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color)) row += 2", "= [] self.images = {c: deque(maxlen=10) for c in self.cameras}", "# Default variables self.status_battery_perc = None self.state = State() self.state_sub", "= extended_msg def diagnostics_callback(self, diagnostics_msg): for status in diagnostics_msg.status: if", "v = self.setpoint.velocity vx, vy, vz = round(v.x, 2), round(v.y,", "screen.addstr(row, x_tab, 'Battery: ') screen.addstr(row, x_indent, status_battery, get_color(color)) row +=", "import numpy as np import rospy from diagnostic_msgs.msg import DiagnosticArray,", "screen.addstr(row, x_tab, 'Global pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f}", "pose_msg): self.global_pose = pose_msg def setpoint_callback(self, setpoint_msg): self.setpoint = setpoint_msg", "severity = statustext_msg.severity msg = statustext_msg severity_red = [msg.EMERGENCY, msg.ALERT,", "2) yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint: ') screen.addstr(row, x_indent,", "curses.COLOR_CYAN mode = self.state.mode if mode.startswith('AUTO'): mode = mode.split('.')[-1] mode", "get_color(color)) row += 1 # Extended status if self.extended.landed_state ==", "= '' self.screen.keypad(True) curses.curs_set(False) # Hide cursor colors = [curses.COLOR_BLACK,", "def state_callback(self, state_msg): self.state = state_msg def extended_callback(self, extended_msg): self.extended", "== self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air' color = curses.COLOR_RED elif self.extended.landed_state", "z = round(p.x, 2), round(p.y, 2), round(p.z, 2) yaw, pitch,", "('2D lock', curses.COLOR_BLUE), 3: ('3D lock', curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA),", "self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose = PoseStamped() self.local_pose_sub", "# Cameras freqs = {c: 0 for c in self.cameras}", "+= 1 screen.refresh() def print_status(self): screen = self.status screen.clear() #", "datetime import numpy as np import rospy from diagnostic_msgs.msg import", "= {c: 0 for c in self.cameras} for cam, messages", "= self.state.mode if mode.startswith('AUTO'): mode = mode.split('.')[-1] mode = mode.capitalize()", "queue_size=1) self.diagnostics = DiagnosticArray() self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics',", "f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2 **", "= 1 / np.mean(durations) if np.isnan(frequency): return 0 return frequency", "# Hide cursor colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA,", "x_tab, 'Cameras: ') screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front", "PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics = DiagnosticArray() self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub", "self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff' color = curses.COLOR_RED elif", "'Air' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended =", "if value.key == 'Satellites visible': satellites = value.value elif value.key", "curses.COLOR_CYAN if battery_percentage > 50: color = curses.COLOR_GREEN elif battery_percentage", "'GPS info: ') screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color)) row", "int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Local pos: ') screen.addstr(row, x_indent,", "'Setpoint: ') screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)')", "color setup curses.use_default_colors() for color in colors: curses.init_pair(color, color, -1)", "mode = 'None' elif mode == 'Posctl': mode = 'Position'", "pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Global pos:", "curses.nocbreak() self.screen.keypad(False) curses.echo() def curses_main(screen): StatusNode(screen).run() def main(): try: curses.wrapper(curses_main)", "def print_status(self): screen = self.status screen.clear() # rospy.loginfo(status) # print(status)", "for status in diagnostics_msg.status: if 'GPS' in status.name: self.diagnostic_gps =", "color = curses.COLOR_GREEN elif battery_percentage > 25: color = curses.COLOR_YELLOW", "# self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback, # queue_size=1) self.gps", "color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff'", "1, 2) # self.console = curses.newwin(self.rows - height_status, self.cols, 12,", "GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS info: ') screen.addstr(row, x_indent, f'{fix_type} ({satellites}", "screen.addstr(row, x_indent, status_armed, get_color(color)) row += 1 # Mode color", "row = 0 # Battery battery_percentage = int(self.battery.percentage * 100)", "'Takeoff' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended =", "row += 2 # GPS pos latitude = self.gps.latitude longitude", "= None self.state = State() self.state_sub = rospy.Subscriber('mavros/state', State, callback=self.state_callback,", "x_indent, status_mode, get_color(color)) row += 1 # Extended status if", "gps_msg): self.gps = gps_msg def local_pose_callback(self, pose_msg): self.local_pose = pose_msg", "curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined' color =", "'Rtl': mode = 'Return' status_mode = '{}'.format(mode) screen.addstr(row, x_tab, 'Mode:", "{c: 0 for c in self.cameras} for cam, messages in", "= datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str = datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text", "yaw, pitch, roll = rot.as_euler('zyx', degrees=True) x, y, z =", "'%' screen.addstr(row, x_tab, 'Battery: ') screen.addstr(row, x_indent, status_battery, get_color(color)) row", "self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined' color = curses.COLOR_CYAN screen.addstr(row, x_tab, 'State:", "from collections import deque from datetime import datetime import numpy", "import ExtendedState, PositionTarget, State # StatusText from scipy.spatial.transform import Rotation", "Image, NavSatFix GPS_FIX_DICT = { 0: ('No GPS', curses.COLOR_RED), 1:", "curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses color setup curses.use_default_colors()", "x_indent, f'{fix_type} ({satellites} sat)', get_color(color)) row += 2 # GPS", "= rot.as_euler('zyx', degrees=True) x, y, z = round(p.x, 2), round(p.y,", "status_armed = 'No' screen.addstr(row, x_tab, 'Armed: ') screen.addstr(row, x_indent, status_armed,", "= 'Ground' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended", "- messages[i].header.stamp durations.append(duration.to_sec()) frequency = 1 / np.mean(durations) if np.isnan(frequency):", "self.text = '' self.screen.keypad(True) curses.curs_set(False) # Hide cursor colors =", "State, callback=self.state_callback, queue_size=1) self.battery = BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState,", "elif severity in severity_neutral: color = curses.COLOR_WHITE self.text = f'{time_str}:", "= f'{time_str}: {text} ({color})' # screen.addstr(self.lines, 0, log, get_color(color)) self.lines", "1 / np.mean(durations) if np.isnan(frequency): return 0 return frequency class", "BatteryState, callback=self.battery_callback, queue_size=1) self.extended = ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState,", "statustext_msg.severity msg = statustext_msg severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR]", "battery_percentage > 0: color = curses.COLOR_RED status_battery = str(battery_percentage) +", "vy, vz = round(v.x, 2), round(v.y, 2), round(v.z, 2) yaw", "rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics = DiagnosticArray() self.diagnostic_gps = DiagnosticStatus()", "callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2 ** 24) self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg):", "R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw, pitch, roll = rot.as_euler('zyx', degrees=True)", "for c in self.cameras} for cam, messages in self.images.items(): freqs[cam]", "x_indent, f'{ff} {fr} {fb} {fl} (front right back left [Hz])')", "main(): try: curses.wrapper(curses_main) except rospy.ROSInterruptException: pass if __name__ == '__main__':", "round(v.y, 2), round(v.z, 2) yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint:", "for c in self.cameras} for camera in self.cameras: topic =", "severity in severity_neutral: color = curses.COLOR_WHITE self.text = f'{time_str}: {text}", "* 100) color = curses.COLOR_CYAN if battery_percentage > 50: color", "battery_msg def state_callback(self, state_msg): self.state = state_msg def extended_callback(self, extended_msg):", "y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2) yaw,", "{fl} (front right back left [Hz])') row += 1 screen.refresh()", "= rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1) self.battery = BatteryState() self.battery_sub =", "') screen.addstr(row, x_indent, status_extended, get_color(color)) row += 1 # GPS", "visible': satellites = value.value elif value.key == 'Fix type': fix_type,", "12, 2) self.lines = 0 self.text = '' self.screen.keypad(True) curses.curs_set(False)", "BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1) self.extended = ExtendedState()", "screen): rospy.init_node('status_node', argv=sys.argv) self.rate = rospy.get_param('~rate', default=1.0) # Curses setup", "= [msg.INFO, msg.DEBUG] color = curses.COLOR_CYAN if severity in severity_red:", "screen.addstr(row, x_tab, 'State: ') screen.addstr(row, x_indent, status_extended, get_color(color)) row +=", "= [q.x, q.y, q.z, q.w] try: rot = R.from_quat(quaternion) except", "status_extended = 'Ground' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF:", "StatusNode(screen).run() def main(): try: curses.wrapper(curses_main) except rospy.ROSInterruptException: pass if __name__", "self.lines += 1 screen.refresh() def print_status(self): screen = self.status screen.clear()", "== 'Satellites visible': satellites = value.value elif value.key == 'Fix", "queue_size=1) self.extended = ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1)", "'right', 'back', 'left'] self.image_subscribers = [] self.images = {c: deque(maxlen=10)", "= int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint: ') screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f}", "= 0 fix_type, color = GPS_FIX_DICT[0] for value in self.diagnostic_gps.values:", "2), round(p.y, 2), round(p.z, 2) yaw, pitch, roll = int(yaw),", "= 15 self.status = curses.newwin(height_status, self.cols, 1, 2) # self.console", "q = self.local_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try:", "info satellites = 0 fix_type, color = GPS_FIX_DICT[0] for value", "screen.refresh() self.screen.refresh() def run(self): rate = rospy.Rate(self.rate) try: while not", "x_tab, 'Mode: ') screen.addstr(row, x_indent, status_mode, get_color(color)) row += 1", "curses.COLOR_YELLOW elif battery_percentage > 0: color = curses.COLOR_RED status_battery =", "** 24) self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg): if battery_msg.location == 'id0':", "f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row, x_tab, 'GPS pos: ') screen.addstr(row,", "-1) # Default variables self.status_battery_perc = None self.state = State()", "+= 1 # Armed if self.state.armed: color = curses.COLOR_RED status_armed", "curses.newwin(self.rows - height_status, self.cols, 12, 2) self.lines = 0 self.text", "def battery_callback(self, battery_msg): if battery_msg.location == 'id0': self.battery = battery_msg", "deque(maxlen=10) for c in self.cameras} for camera in self.cameras: topic", "ff, fr, fb, fl = [int(round(v)) for k, v in", "== self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground' color = curses.COLOR_GREEN elif self.extended.landed_state", "screen = self.status screen.clear() # rospy.loginfo(status) # print(status) x_tab =", "curses.COLOR_CYAN screen.addstr(row, x_tab, 'State: ') screen.addstr(row, x_indent, status_extended, get_color(color)) row", "color = GPS_FIX_DICT[0] for value in self.diagnostic_gps.values: if value.key ==", "in severity_neutral: color = curses.COLOR_WHITE self.text = f'{time_str}: {text} ({color})'", "= rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2 ** 24) self.image_subscribers.append(subscriber)", "self.image_subscribers = [] self.images = {c: deque(maxlen=10) for c in", "row += 1 # Local pose p = self.local_pose.pose.position q", "q.y, q.z, q.w] try: rot = R.from_quat(quaternion) except ValueError: rot", "== '': mode = 'None' elif mode == 'Posctl': mode", "queue_size=1) # self.statustext = StatusText() # self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText,", "1: ('No fix', curses.COLOR_RED), 2: ('2D lock', curses.COLOR_BLUE), 3: ('3D", "import sys from collections import deque from datetime import datetime", "self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff' color = curses.COLOR_RED elif self.extended.landed_state ==", "in self.cameras: topic = f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback,", "rospy.Rate(self.rate) try: while not rospy.is_shutdown(): self.print_status() rate.sleep() except rospy.ROSInterruptException: curses.nocbreak()", "0 fix_type, color = GPS_FIX_DICT[0] for value in self.diagnostic_gps.values: if", "'Position' elif mode == 'Rtl': mode = 'Return' status_mode =", "camera in self.cameras: topic = f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic, Image,", "f'{fix_type} ({satellites} sat)', get_color(color)) row += 2 # GPS pos", "= [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] #", "24) self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg): if battery_msg.location == 'id0': self.battery", "DiagnosticArray, DiagnosticStatus from geometry_msgs.msg import PoseStamped from mavros_msgs.msg import ExtendedState,", "DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint = PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget,", "pose_msg def global_pose_callback(self, pose_msg): self.global_pose = pose_msg def setpoint_callback(self, setpoint_msg):", "status if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air' color =", "info: ') screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color)) row +=", "self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2) self.lines =", "{y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)') row += 1", "durations = [] for i in range(len(messages) - 1): duration", "0.0, 0.0]) yaw, pitch, roll = rot.as_euler('zyx', degrees=True) x, y,", "{yaw} (RPY)') row += 1 # Setpoint v = self.setpoint.velocity", "degrees=True) x, y, z = round(p.x, 2), round(p.y, 2), round(p.z,", "quaternion = [q.x, q.y, q.z, q.w] try: rot = R.from_quat(quaternion)", "screen = self.console time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str =", "status_armed, get_color(color)) row += 1 # Mode color = curses.COLOR_CYAN", "> 25: color = curses.COLOR_YELLOW elif battery_percentage > 0: color", "= curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff' color", "if battery_percentage > 50: color = curses.COLOR_GREEN elif battery_percentage >", "sys from collections import deque from datetime import datetime import", "('No GPS', curses.COLOR_RED), 1: ('No fix', curses.COLOR_RED), 2: ('2D lock',", "= datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text severity = statustext_msg.severity msg =", "mode = self.state.mode if mode.startswith('AUTO'): mode = mode.split('.')[-1] mode =", "= f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row, x_tab, 'GPS pos: ')", "msg.ERROR] severity_yellow = [msg.WARNING, msg.NOTICE] severity_neutral = [msg.INFO, msg.DEBUG] color", "= str(battery_percentage) + '%' screen.addstr(row, x_tab, 'Battery: ') screen.addstr(row, x_indent,", "(XYZ) {roll} {pitch} {yaw} (RPY)') row += 1 # Global", "color = curses.COLOR_RED status_armed = 'Yes' else: color = curses.COLOR_GREEN", "rospy.is_shutdown(): self.print_status() rate.sleep() except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo() def curses_main(screen):", "color = curses.COLOR_YELLOW elif severity in severity_neutral: color = curses.COLOR_WHITE", "100) color = curses.COLOR_CYAN if battery_percentage > 50: color =", "x_tab, 'Setpoint: ') screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw}", "freqs = {c: 0 for c in self.cameras} for cam,", "self.state.mode if mode.startswith('AUTO'): mode = mode.split('.')[-1] mode = mode.capitalize() if", "round(p.z, 2) yaw, pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row,", "color = curses.COLOR_CYAN if battery_percentage > 50: color = curses.COLOR_GREEN", "from datetime import datetime import numpy as np import rospy", "= DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint =", "setpoint_msg): self.setpoint = setpoint_msg def image_callback(self, image_msg, camera): self.images[camera].append(image_msg) def", "callback=self.global_pose_callback, queue_size=1) self.diagnostics = DiagnosticArray() self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub =", "for value in self.diagnostic_gps.values: if value.key == 'Satellites visible': satellites", "if mode == '': mode = 'None' elif mode ==", "topic = f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera, queue_size=1,", "queue_size=1, buff_size=2 ** 24) self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg): if battery_msg.location", "gps_callback(self, gps_msg): self.gps = gps_msg def local_pose_callback(self, pose_msg): self.local_pose =", "fix', curses.COLOR_GREEN) } def get_color(color): return curses.color_pair(color) def frequency_from_messages(messages): durations", "self.statustext = StatusText() # self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback,", "int(roll) screen.addstr(row, x_tab, 'Local pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f}", "self.gps = NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose", "self.local_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try: rot =", "= curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined' color", "self.battery = BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1) self.extended", "'GPS' in status.name: self.diagnostic_gps = status def gps_callback(self, gps_msg): self.gps", "mode = 'Position' elif mode == 'Rtl': mode = 'Return'", "= curses.COLOR_CYAN if battery_percentage > 50: color = curses.COLOR_GREEN elif", "x_tab, 'GPS info: ') screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color))", "') screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right back", "screen.addstr(row, x_indent, status_gps) row += 1 # Local pose p", "fr, fb, fl = [int(round(v)) for k, v in freqs.items()]", "buff_size=2 ** 24) self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg): if battery_msg.location ==", "6: ('RTK fix', curses.COLOR_GREEN) } def get_color(color): return curses.color_pair(color) def", "self.battery = battery_msg def state_callback(self, state_msg): self.state = state_msg def", "2) status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row, x_tab, 'GPS", "mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText from scipy.spatial.transform import", "= 'Return' status_mode = '{}'.format(mode) screen.addstr(row, x_tab, 'Mode: ') screen.addstr(row,", "= round(self.gps.altitude, 2) status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row,", "1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec()) frequency = 1 / np.mean(durations) if", "# rospy.loginfo(status) # print(status) x_tab = 0 x_indent = 14", "x_tab, 'Battery: ') screen.addstr(row, x_indent, status_battery, get_color(color)) row += 1", "1 # Global pose p = self.global_pose.pose.position q = self.global_pose.pose.orientation", "2), round(v.z, 2) yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint: ')", "# self.statustext = StatusText() # self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText, #", "curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses color setup curses.use_default_colors() for color", "15 self.status = curses.newwin(height_status, self.cols, 1, 2) # self.console =", "screen.addstr(row, x_indent, status_extended, get_color(color)) row += 1 # GPS info", "('3D lock', curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA), 5: ('RTK float', curses.COLOR_YELLOW),", "= GPS_FIX_DICT[0] for value in self.diagnostic_gps.values: if value.key == 'Satellites", "for camera in self.cameras: topic = f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic,", "2), round(v.y, 2), round(v.z, 2) yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab,", "row += 1 # Cameras freqs = {c: 0 for", "float', curses.COLOR_YELLOW), 6: ('RTK fix', curses.COLOR_GREEN) } def get_color(color): return", "in self.diagnostic_gps.values: if value.key == 'Satellites visible': satellites = value.value", "for color in colors: curses.init_pair(color, color, -1) # Default variables", "self.status screen.clear() # rospy.loginfo(status) # print(status) x_tab = 0 x_indent", "# Setpoint v = self.setpoint.velocity vx, vy, vz = round(v.x,", "frequency class StatusNode: def __init__(self, screen): rospy.init_node('status_node', argv=sys.argv) self.rate =", "color = GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS info: ') screen.addstr(row, x_indent,", "0 # Battery battery_percentage = int(self.battery.percentage * 100) color =", "= statustext_msg severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow =", "setpoint_msg def image_callback(self, image_msg, camera): self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg): screen", "sensor_msgs.msg import BatteryState, Image, NavSatFix GPS_FIX_DICT = { 0: ('No", "messages in self.images.items(): freqs[cam] = frequency_from_messages(messages) ff, fr, fb, fl", "deque from datetime import datetime import numpy as np import", "int(roll) screen.addstr(row, x_tab, 'Global pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f}", "python3 from __future__ import absolute_import, division, print_function import curses import", "screen.addstr(row, x_indent, status_battery, get_color(color)) row += 1 # Armed if", "= StatusText() # self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback, #", "return 0 return frequency class StatusNode: def __init__(self, screen): rospy.init_node('status_node',", "= self.local_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try: rot", "back left [Hz])') row += 1 screen.refresh() self.screen.refresh() def run(self):", "Mode color = curses.COLOR_CYAN mode = self.state.mode if mode.startswith('AUTO'): mode", "row += 1 # Mode color = curses.COLOR_CYAN mode =", "freqs[cam] = frequency_from_messages(messages) ff, fr, fb, fl = [int(round(v)) for", "int(self.battery.percentage * 100) color = curses.COLOR_CYAN if battery_percentage > 50:", "def diagnostics_callback(self, diagnostics_msg): for status in diagnostics_msg.status: if 'GPS' in", "pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch}", "self.screen.keypad(False) curses.echo() def curses_main(screen): StatusNode(screen).run() def main(): try: curses.wrapper(curses_main) except", "messages[i + 1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec()) frequency = 1 /", "= curses.COLOR_RED status_battery = str(battery_percentage) + '%' screen.addstr(row, x_tab, 'Battery:", "color = curses.COLOR_RED status_battery = str(battery_percentage) + '%' screen.addstr(row, x_tab,", "yaw, pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Local", "import PoseStamped from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText", "curses.COLOR_RED status_battery = str(battery_percentage) + '%' screen.addstr(row, x_tab, 'Battery: ')", "color = curses.COLOR_CYAN if severity in severity_red: color = curses.COLOR_RED", "frequency_from_messages(messages) ff, fr, fb, fl = [int(round(v)) for k, v", "GPS info satellites = 0 fix_type, color = GPS_FIX_DICT[0] for", "setpoint_callback(self, setpoint_msg): self.setpoint = setpoint_msg def image_callback(self, image_msg, camera): self.images[camera].append(image_msg)", "self.setpoint = PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras", "get_color(color)) row += 1 # Armed if self.state.armed: color =", "{text} ({color})' # screen.addstr(self.lines, 0, log, get_color(color)) self.lines += 1", "color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended = 'Landed'", "curses import sys from collections import deque from datetime import", "curses.COLOR_RED), 2: ('2D lock', curses.COLOR_BLUE), 3: ('3D lock', curses.COLOR_BLUE), 4:", "row += 1 # GPS info satellites = 0 fix_type,", "Hide cursor colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED,", "status_mode = '{}'.format(mode) screen.addstr(row, x_tab, 'Mode: ') screen.addstr(row, x_indent, status_mode,", "(RPY)') row += 1 # Setpoint v = self.setpoint.velocity vx,", "def global_pose_callback(self, pose_msg): self.global_pose = pose_msg def setpoint_callback(self, setpoint_msg): self.setpoint", "rospy from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus from geometry_msgs.msg import PoseStamped", "'Landed' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended =", "statustext_msg.text severity = statustext_msg.severity msg = statustext_msg severity_red = [msg.EMERGENCY,", "(XYZ) {roll} {pitch} {yaw} (RPY)') row += 1 # Setpoint", "= BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1) self.extended =", "severity_red: color = curses.COLOR_RED elif severity in severity_yellow: color =", "# Mode color = curses.COLOR_CYAN mode = self.state.mode if mode.startswith('AUTO'):", "x_tab, 'Global pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ)", "for i in range(len(messages) - 1): duration = messages[i +", "absolute_import, division, print_function import curses import sys from collections import", "= rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback, # queue_size=1) self.gps = NavSatFix()", "GPS_FIX_DICT = { 0: ('No GPS', curses.COLOR_RED), 1: ('No fix',", "Local pose p = self.local_pose.pose.position q = self.local_pose.pose.orientation quaternion =", "row += 1 # Armed if self.state.armed: color = curses.COLOR_RED", "[] for i in range(len(messages) - 1): duration = messages[i", "def run(self): rate = rospy.Rate(self.rate) try: while not rospy.is_shutdown(): self.print_status()", "= round(v.x, 2), round(v.y, 2), round(v.z, 2) yaw = int(np.rad2deg(self.setpoint.yaw))", "'Offboard': color = curses.COLOR_RED else: color = curses.COLOR_BLUE if mode", "1 # Cameras freqs = {c: 0 for c in", "battery_msg.location == 'id0': self.battery = battery_msg def state_callback(self, state_msg): self.state", "x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)') row", "in severity_yellow: color = curses.COLOR_YELLOW elif severity in severity_neutral: color", "rospy.get_param('~rate', default=1.0) # Curses setup self.screen = curses.initscr() self.rows, self.cols", "[curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses", "try: curses.wrapper(curses_main) except rospy.ROSInterruptException: pass if __name__ == '__main__': main()", "argv=sys.argv) self.rate = rospy.get_param('~rate', default=1.0) # Curses setup self.screen =", "elif battery_percentage > 0: color = curses.COLOR_RED status_battery = str(battery_percentage)", "rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1) # self.statustext = StatusText() # self.statustext_sub", "pose p = self.global_pose.pose.position q = self.global_pose.pose.orientation quaternion = [q.x,", "State # StatusText from scipy.spatial.transform import Rotation as R from", "# time_str = datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text severity = statustext_msg.severity", "screen.addstr(self.lines, 0, log, get_color(color)) self.lines += 1 screen.refresh() def print_status(self):", "['front', 'right', 'back', 'left'] self.image_subscribers = [] self.images = {c:", "'': mode = 'None' elif mode == 'Posctl': mode =", "Extended status if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air' color", "'Global pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll}", "') screen.addstr(row, x_indent, status_mode, get_color(color)) row += 1 # Extended", "[q.x, q.y, q.z, q.w] try: rot = R.from_quat(quaternion) except ValueError:", "curses.COLOR_RED), 1: ('No fix', curses.COLOR_RED), 2: ('2D lock', curses.COLOR_BLUE), 3:", "= PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics =", "'GPS pos: ') screen.addstr(row, x_indent, status_gps) row += 1 #", "vx, vy, vz = round(v.x, 2), round(v.y, 2), round(v.z, 2)", "c in self.cameras} for cam, messages in self.images.items(): freqs[cam] =", "row += 1 # Extended status if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR:", "curses.COLOR_MAGENTA), 5: ('RTK float', curses.COLOR_YELLOW), 6: ('RTK fix', curses.COLOR_GREEN) }", "# Armed if self.state.armed: color = curses.COLOR_RED status_armed = 'Yes'", "= value.value elif value.key == 'Fix type': fix_type, color =", "__init__(self, screen): rospy.init_node('status_node', argv=sys.argv) self.rate = rospy.get_param('~rate', default=1.0) # Curses", "25: color = curses.COLOR_YELLOW elif battery_percentage > 0: color =", "self.cols = self.screen.getmaxyx() height_status = 15 self.status = curses.newwin(height_status, self.cols,", "= curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended = 'Landed' color", "diagnostics_msg.status: if 'GPS' in status.name: self.diagnostic_gps = status def gps_callback(self,", "Battery battery_percentage = int(self.battery.percentage * 100) color = curses.COLOR_CYAN if", "curses.newwin(height_status, self.cols, 1, 2) # self.console = curses.newwin(self.rows - height_status,", "= state_msg def extended_callback(self, extended_msg): self.extended = extended_msg def diagnostics_callback(self,", "# Extended status if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air'", "value.key == 'Satellites visible': satellites = value.value elif value.key ==", "elif mode == 'Rtl': mode = 'Return' status_mode = '{}'.format(mode)", "run(self): rate = rospy.Rate(self.rate) try: while not rospy.is_shutdown(): self.print_status() rate.sleep()", "rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras = ['front', 'right', 'back', 'left']", "curses.COLOR_GREEN elif battery_percentage > 25: color = curses.COLOR_YELLOW elif battery_percentage", "status def gps_callback(self, gps_msg): self.gps = gps_msg def local_pose_callback(self, pose_msg):", "self.cameras} for cam, messages in self.images.items(): freqs[cam] = frequency_from_messages(messages) ff,", "numpy as np import rospy from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus", "self.cameras: topic = f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera,", "as np import rospy from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus from", "GPS pos latitude = self.gps.latitude longitude = self.gps.longitude altitude =", "= 0 self.text = '' self.screen.keypad(True) curses.curs_set(False) # Hide cursor", "= rospy.get_param('~rate', default=1.0) # Curses setup self.screen = curses.initscr() self.rows,", "altitude = round(self.gps.altitude, 2) status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)'", "in diagnostics_msg.status: if 'GPS' in status.name: self.diagnostic_gps = status def", "x_tab = 0 x_indent = 14 row = 0 #", "# Battery battery_percentage = int(self.battery.percentage * 100) color = curses.COLOR_CYAN", "longitude = self.gps.longitude altitude = round(self.gps.altitude, 2) status_gps = f'{latitude:.7f}", "} def get_color(color): return curses.color_pair(color) def frequency_from_messages(messages): durations = []", "round(v.x, 2), round(v.y, 2), round(v.z, 2) yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row,", "= [msg.WARNING, msg.NOTICE] severity_neutral = [msg.INFO, msg.DEBUG] color = curses.COLOR_CYAN", "BatteryState, Image, NavSatFix GPS_FIX_DICT = { 0: ('No GPS', curses.COLOR_RED),", "mode == 'Posctl': mode = 'Position' elif mode == 'Rtl':", "self.status_battery_perc = None self.state = State() self.state_sub = rospy.Subscriber('mavros/state', State,", "curses.COLOR_WHITE self.text = f'{time_str}: {text} ({color})' # screen.addstr(self.lines, 0, log,", "= self.setpoint.velocity vx, vy, vz = round(v.x, 2), round(v.y, 2),", "screen.addstr(row, x_tab, 'Armed: ') screen.addstr(row, x_indent, status_armed, get_color(color)) row +=", "1 # Extended status if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended =", "except ValueError: rot = R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw, pitch,", "collections import deque from datetime import datetime import numpy as", "'State: ') screen.addstr(row, x_indent, status_extended, get_color(color)) row += 1 #", "default=1.0) # Curses setup self.screen = curses.initscr() self.rows, self.cols =", "from sensor_msgs.msg import BatteryState, Image, NavSatFix GPS_FIX_DICT = { 0:", "('RTK fix', curses.COLOR_GREEN) } def get_color(color): return curses.color_pair(color) def frequency_from_messages(messages):", "PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose = PoseStamped()", "roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Local pos: ')", "Setpoint v = self.setpoint.velocity vx, vy, vz = round(v.x, 2),", "cam, messages in self.images.items(): freqs[cam] = frequency_from_messages(messages) ff, fr, fb,", "+= 1 screen.refresh() self.screen.refresh() def run(self): rate = rospy.Rate(self.rate) try:", "callback=self.diagnostics_callback, queue_size=1) self.setpoint = PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback,", "int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Global pos: ') screen.addstr(row, x_indent,", "14 row = 0 # Battery battery_percentage = int(self.battery.percentage *", "curses.COLOR_BLUE), 3: ('3D lock', curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA), 5: ('RTK", "(LLA)' screen.addstr(row, x_tab, 'GPS pos: ') screen.addstr(row, x_indent, status_gps) row", "color = curses.COLOR_RED else: color = curses.COLOR_BLUE if mode ==", "if battery_msg.location == 'id0': self.battery = battery_msg def state_callback(self, state_msg):", "def __init__(self, screen): rospy.init_node('status_node', argv=sys.argv) self.rate = rospy.get_param('~rate', default=1.0) #", "status_extended = 'Landed' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND:", "time_str = datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text severity = statustext_msg.severity msg", "gps_msg def local_pose_callback(self, pose_msg): self.local_pose = pose_msg def global_pose_callback(self, pose_msg):", "satellites = 0 fix_type, color = GPS_FIX_DICT[0] for value in", "sat)', get_color(color)) row += 2 # GPS pos latitude =", "{altitude:.2f} (LLA)' screen.addstr(row, x_tab, 'GPS pos: ') screen.addstr(row, x_indent, status_gps)", "def get_color(color): return curses.color_pair(color) def frequency_from_messages(messages): durations = [] for", "(XYZ) {yaw} (Y)') row += 1 # Cameras freqs =", "self.images.items(): freqs[cam] = frequency_from_messages(messages) ff, fr, fb, fl = [int(round(v))", "curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA), 5: ('RTK float', curses.COLOR_YELLOW), 6: ('RTK", "extended_msg def diagnostics_callback(self, diagnostics_msg): for status in diagnostics_msg.status: if 'GPS'", "= curses.initscr() self.rows, self.cols = self.screen.getmaxyx() height_status = 15 self.status", "not rospy.is_shutdown(): self.print_status() rate.sleep() except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo() def", "== 'id0': self.battery = battery_msg def state_callback(self, state_msg): self.state =", "self.cameras = ['front', 'right', 'back', 'left'] self.image_subscribers = [] self.images", "Image, callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2 ** 24) self.image_subscribers.append(subscriber) def battery_callback(self,", "callback=self.battery_callback, queue_size=1) self.extended = ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback,", "q = self.global_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try:", "as R from sensor_msgs.msg import BatteryState, Image, NavSatFix GPS_FIX_DICT =", "rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1) self.extended = ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state',", "queue_size=1) self.setpoint = PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1)", "GPS_FIX_DICT[0] for value in self.diagnostic_gps.values: if value.key == 'Satellites visible':", "'None' elif mode == 'Posctl': mode = 'Position' elif mode", "= ['front', 'right', 'back', 'left'] self.image_subscribers = [] self.images =", "c in self.cameras} for camera in self.cameras: topic = f'camera_{camera}/image_raw'", "curses.COLOR_BLUE if mode == '': mode = 'None' elif mode", "elif mode == 'Posctl': mode = 'Position' elif mode ==", "[msg.INFO, msg.DEBUG] color = curses.COLOR_CYAN if severity in severity_red: color", "else: color = curses.COLOR_BLUE if mode == '': mode =", "+= 2 # GPS pos latitude = self.gps.latitude longitude =", "battery_callback(self, battery_msg): if battery_msg.location == 'id0': self.battery = battery_msg def", "{pitch} {yaw} (RPY)') row += 1 # Setpoint v =", "= [] for i in range(len(messages) - 1): duration =", "[msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow = [msg.WARNING, msg.NOTICE] severity_neutral =", "0: ('No GPS', curses.COLOR_RED), 1: ('No fix', curses.COLOR_RED), 2: ('2D", "status_battery, get_color(color)) row += 1 # Armed if self.state.armed: color", "roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Global pos: ')", "PositionTarget, State # StatusText from scipy.spatial.transform import Rotation as R", "row += 1 # Global pose p = self.global_pose.pose.position q", "+ 1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec()) frequency = 1 / np.mean(durations)", "color = curses.COLOR_WHITE self.text = f'{time_str}: {text} ({color})' # screen.addstr(self.lines,", "Rotation as R from sensor_msgs.msg import BatteryState, Image, NavSatFix GPS_FIX_DICT", "StatusText, # callback=self.statustext_callback, # queue_size=1) self.gps = NavSatFix() self.gps_sub =", "screen.addstr(row, x_indent, status_mode, get_color(color)) row += 1 # Extended status", "') screen.addstr(row, x_indent, status_gps) row += 1 # Local pose", "rot = R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw, pitch, roll =", "'id0': self.battery = battery_msg def state_callback(self, state_msg): self.state = state_msg", "= 'No' screen.addstr(row, x_tab, 'Armed: ') screen.addstr(row, x_indent, status_armed, get_color(color))", "self.extended = extended_msg def diagnostics_callback(self, diagnostics_msg): for status in diagnostics_msg.status:", "# Curses setup self.screen = curses.initscr() self.rows, self.cols = self.screen.getmaxyx()", "= curses.COLOR_YELLOW elif severity in severity_neutral: color = curses.COLOR_WHITE self.text", "= R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw, pitch, roll = rot.as_euler('zyx',", "= [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow = [msg.WARNING, msg.NOTICE] severity_neutral", "rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose = PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose',", "colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW]", "x_indent = 14 row = 0 # Battery battery_percentage =", "= {c: deque(maxlen=10) for c in self.cameras} for camera in", "{roll} {pitch} {yaw} (RPY)') row += 1 # Setpoint v", "left [Hz])') row += 1 screen.refresh() self.screen.refresh() def run(self): rate", "2), round(p.z, 2) yaw, pitch, roll = int(yaw), int(pitch), int(roll)", "callback=self.gps_callback, queue_size=1) self.local_pose = PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback,", "queue_size=1) self.local_pose = PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1)", "while not rospy.is_shutdown(): self.print_status() rate.sleep() except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo()", "curses.COLOR_RED status_armed = 'Yes' else: color = curses.COLOR_GREEN status_armed =", "self.cameras} for camera in self.cameras: topic = f'camera_{camera}/image_raw' subscriber =", "self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air' color = curses.COLOR_RED elif self.extended.landed_state ==", "curses.init_pair(color, color, -1) # Default variables self.status_battery_perc = None self.state", "def main(): try: curses.wrapper(curses_main) except rospy.ROSInterruptException: pass if __name__ ==", "lock', curses.COLOR_BLUE), 3: ('3D lock', curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA), 5:", "') screen.addstr(row, x_indent, status_armed, get_color(color)) row += 1 # Mode", "# self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2) self.lines", "NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose = PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped,", "pitch, roll = rot.as_euler('zyx', degrees=True) x, y, z = round(p.x,", "fix_type, color = GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS info: ') screen.addstr(row,", "= messages[i + 1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec()) frequency = 1", "import BatteryState, Image, NavSatFix GPS_FIX_DICT = { 0: ('No GPS',", "state_msg): self.state = state_msg def extended_callback(self, extended_msg): self.extended = extended_msg", "msg.NOTICE] severity_neutral = [msg.INFO, msg.DEBUG] color = curses.COLOR_CYAN if severity", "battery_percentage > 25: color = curses.COLOR_YELLOW elif battery_percentage > 0:", "# callback=self.statustext_callback, # queue_size=1) self.gps = NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix',", "self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended = 'Landed' color = curses.COLOR_GREEN elif", "self.local_pose = PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose", "range(len(messages) - 1): duration = messages[i + 1].header.stamp - messages[i].header.stamp", "screen.addstr(row, x_tab, 'GPS info: ') screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)',", "f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)') row += 1 #", "global_pose_callback(self, pose_msg): self.global_pose = pose_msg def setpoint_callback(self, setpoint_msg): self.setpoint =", "status_armed = 'Yes' else: color = curses.COLOR_GREEN status_armed = 'No'", "= curses.COLOR_WHITE self.text = f'{time_str}: {text} ({color})' # screen.addstr(self.lines, 0,", "else: color = curses.COLOR_GREEN status_armed = 'No' screen.addstr(row, x_tab, 'Armed:", "self.images = {c: deque(maxlen=10) for c in self.cameras} for camera", "np.mean(durations) if np.isnan(frequency): return 0 return frequency class StatusNode: def", "round(v.z, 2) yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint: ') screen.addstr(row,", "q.z, q.w] try: rot = R.from_quat(quaternion) except ValueError: rot =", "> 50: color = curses.COLOR_GREEN elif battery_percentage > 25: color", "'Fix type': fix_type, color = GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS info:", "color = curses.COLOR_CYAN mode = self.state.mode if mode.startswith('AUTO'): mode =", "0.0]) yaw, pitch, roll = rot.as_euler('zyx', degrees=True) x, y, z", "print(status) x_tab = 0 x_indent = 14 row = 0", "diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus from geometry_msgs.msg import PoseStamped from mavros_msgs.msg", "curses.COLOR_GREEN status_armed = 'No' screen.addstr(row, x_tab, 'Armed: ') screen.addstr(row, x_indent,", "'Yes' else: color = curses.COLOR_GREEN status_armed = 'No' screen.addstr(row, x_tab,", "mode == '': mode = 'None' elif mode == 'Posctl':", "datetime import datetime import numpy as np import rospy from", "statustext_msg severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow = [msg.WARNING,", "== self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined' color = curses.COLOR_CYAN screen.addstr(row, x_tab,", "= curses.COLOR_CYAN if severity in severity_red: color = curses.COLOR_RED elif", "x_indent, status_armed, get_color(color)) row += 1 # Mode color =", "x_tab, 'State: ') screen.addstr(row, x_indent, status_extended, get_color(color)) row += 1", "1): duration = messages[i + 1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec()) frequency", "elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined' color = curses.COLOR_CYAN", "mode.startswith('AUTO'): mode = mode.split('.')[-1] mode = mode.capitalize() if mode ==", "= DiagnosticArray() self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback,", "if 'GPS' in status.name: self.diagnostic_gps = status def gps_callback(self, gps_msg):", "queue_size=1) self.cameras = ['front', 'right', 'back', 'left'] self.image_subscribers = []", "in severity_red: color = curses.COLOR_RED elif severity in severity_yellow: color", "curses_main(screen): StatusNode(screen).run() def main(): try: curses.wrapper(curses_main) except rospy.ROSInterruptException: pass if", "rot.as_euler('zyx', degrees=True) x, y, z = round(p.x, 2), round(p.y, 2),", "rate.sleep() except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo() def curses_main(screen): StatusNode(screen).run() def", "= rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose = PoseStamped() self.local_pose_sub =", "p = self.local_pose.pose.position q = self.local_pose.pose.orientation quaternion = [q.x, q.y,", "ExtendedState, PositionTarget, State # StatusText from scipy.spatial.transform import Rotation as", "from __future__ import absolute_import, division, print_function import curses import sys", "return frequency class StatusNode: def __init__(self, screen): rospy.init_node('status_node', argv=sys.argv) self.rate", "queue_size=1) self.battery = BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1)", "== 'Posctl': mode = 'Position' elif mode == 'Rtl': mode", "curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended = 'Landed' color =", "0, log, get_color(color)) self.lines += 1 screen.refresh() def print_status(self): screen", "= PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose =", "durations.append(duration.to_sec()) frequency = 1 / np.mean(durations) if np.isnan(frequency): return 0", "= curses.newwin(self.rows - height_status, self.cols, 12, 2) self.lines = 0", "setup self.screen = curses.initscr() self.rows, self.cols = self.screen.getmaxyx() height_status =", "Cameras freqs = {c: 0 for c in self.cameras} for", "queue_size=1) self.global_pose = PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1)", "def extended_callback(self, extended_msg): self.extended = extended_msg def diagnostics_callback(self, diagnostics_msg): for", "= self.global_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try: rot", "> 0: color = curses.COLOR_RED status_battery = str(battery_percentage) + '%'", "roll = rot.as_euler('zyx', degrees=True) x, y, z = round(p.x, 2),", "in self.images.items(): freqs[cam] = frequency_from_messages(messages) ff, fr, fb, fl =", "rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo() def curses_main(screen): StatusNode(screen).run() def main(): try:", "self.gps = gps_msg def local_pose_callback(self, pose_msg): self.local_pose = pose_msg def", "= GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS info: ') screen.addstr(row, x_indent, f'{fix_type}", "mode.capitalize() if mode == 'Offboard': color = curses.COLOR_RED else: color", "pitch, roll = int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Local pos:", "self.global_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w] try: rot =", "'Cameras: ') screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right", "') screen.addstr(row, x_indent, status_battery, get_color(color)) row += 1 # Armed", "def statustext_callback(self, statustext_msg): screen = self.console time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "curses.COLOR_YELLOW] # Curses color setup curses.use_default_colors() for color in colors:", "self.state = State() self.state_sub = rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1) self.battery", "from scipy.spatial.transform import Rotation as R from sensor_msgs.msg import BatteryState,", "'left'] self.image_subscribers = [] self.images = {c: deque(maxlen=10) for c", "= statustext_msg.severity msg = statustext_msg severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL,", "value in self.diagnostic_gps.values: if value.key == 'Satellites visible': satellites =", "rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint = PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local',", "PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras = ['front',", "(RPY)') row += 1 # Global pose p = self.global_pose.pose.position", "row += 1 screen.refresh() self.screen.refresh() def run(self): rate = rospy.Rate(self.rate)", "({satellites} sat)', get_color(color)) row += 2 # GPS pos latitude", "'back', 'left'] self.image_subscribers = [] self.images = {c: deque(maxlen=10) for", "severity_yellow = [msg.WARNING, msg.NOTICE] severity_neutral = [msg.INFO, msg.DEBUG] color =", "= rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1) # self.statustext = StatusText() #", "[int(round(v)) for k, v in freqs.items()] screen.addstr(row, x_tab, 'Cameras: ')", "= NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose =", "frequency_from_messages(messages): durations = [] for i in range(len(messages) - 1):", "messages[i].header.stamp durations.append(duration.to_sec()) frequency = 1 / np.mean(durations) if np.isnan(frequency): return", "self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1) self.extended = ExtendedState() self.extended_sub", "= self.gps.longitude altitude = round(self.gps.altitude, 2) status_gps = f'{latitude:.7f} {longitude:.7f}", "yaw = int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint: ') screen.addstr(row, x_indent, f'{vx:.2f}", "self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground' color = curses.COLOR_GREEN elif", "= curses.COLOR_RED else: color = curses.COLOR_BLUE if mode == '':", "in self.cameras} for camera in self.cameras: topic = f'camera_{camera}/image_raw' subscriber", "0 return frequency class StatusNode: def __init__(self, screen): rospy.init_node('status_node', argv=sys.argv)", "def frequency_from_messages(messages): durations = [] for i in range(len(messages) -", "None self.state = State() self.state_sub = rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1)", "x_indent, status_gps) row += 1 # Local pose p =", "- height_status, self.cols, 12, 2) self.lines = 0 self.text =", "colors: curses.init_pair(color, color, -1) # Default variables self.status_battery_perc = None", "curses.COLOR_GREEN) } def get_color(color): return curses.color_pair(color) def frequency_from_messages(messages): durations =", "Curses setup self.screen = curses.initscr() self.rows, self.cols = self.screen.getmaxyx() height_status", "1 # GPS info satellites = 0 fix_type, color =", "value.key == 'Fix type': fix_type, color = GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab,", "status_gps) row += 1 # Local pose p = self.local_pose.pose.position", "self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg): if battery_msg.location == 'id0': self.battery =", "time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str = datetime.datetime.fromtimestamp(unix_time) text =", "= pose_msg def setpoint_callback(self, setpoint_msg): self.setpoint = setpoint_msg def image_callback(self,", "self.diagnostics = DiagnosticArray() self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray,", "division, print_function import curses import sys from collections import deque", "print_function import curses import sys from collections import deque from", "round(p.y, 2), round(p.z, 2) yaw, pitch, roll = int(yaw), int(pitch),", "battery_percentage > 50: color = curses.COLOR_GREEN elif battery_percentage > 25:", "'{}'.format(mode) screen.addstr(row, x_tab, 'Mode: ') screen.addstr(row, x_indent, status_mode, get_color(color)) row", "3: ('3D lock', curses.COLOR_BLUE), 4: ('DGPS', curses.COLOR_MAGENTA), 5: ('RTK float',", "latitude = self.gps.latitude longitude = self.gps.longitude altitude = round(self.gps.altitude, 2)", "class StatusNode: def __init__(self, screen): rospy.init_node('status_node', argv=sys.argv) self.rate = rospy.get_param('~rate',", "row += 1 # Setpoint v = self.setpoint.velocity vx, vy,", "self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1) # self.statustext = StatusText()", "get_color(color)) row += 1 # Mode color = curses.COLOR_CYAN mode", "self.screen.keypad(True) curses.curs_set(False) # Hide cursor colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN,", "self.status = curses.newwin(height_status, self.cols, 1, 2) # self.console = curses.newwin(self.rows", "= curses.newwin(height_status, self.cols, 1, 2) # self.console = curses.newwin(self.rows -", "curses.COLOR_CYAN if severity in severity_red: color = curses.COLOR_RED elif severity", "pose p = self.local_pose.pose.position q = self.local_pose.pose.orientation quaternion = [q.x,", "battery_percentage = int(self.battery.percentage * 100) color = curses.COLOR_CYAN if battery_percentage", "self.local_pose.pose.position q = self.local_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w]", "') screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)') row", "2: ('2D lock', curses.COLOR_BLUE), 3: ('3D lock', curses.COLOR_BLUE), 4: ('DGPS',", "self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose = PoseStamped() self.global_pose_sub", "rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1) self.battery = BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery',", "[Hz])') row += 1 screen.refresh() self.screen.refresh() def run(self): rate =", "1 screen.refresh() self.screen.refresh() def run(self): rate = rospy.Rate(self.rate) try: while", "elif battery_percentage > 25: color = curses.COLOR_YELLOW elif battery_percentage >", "{longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row, x_tab, 'GPS pos: ') screen.addstr(row, x_indent,", "screen.addstr(row, x_tab, 'Setpoint: ') screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ)", "elif severity in severity_yellow: color = curses.COLOR_YELLOW elif severity in", "+= 1 # Mode color = curses.COLOR_CYAN mode = self.state.mode", "fb, fl = [int(round(v)) for k, v in freqs.items()] screen.addstr(row,", "== self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff' color = curses.COLOR_RED elif self.extended.landed_state", "self.gps.latitude longitude = self.gps.longitude altitude = round(self.gps.altitude, 2) status_gps =", "for k, v in freqs.items()] screen.addstr(row, x_tab, 'Cameras: ') screen.addstr(row,", "{fb} {fl} (front right back left [Hz])') row += 1", "self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended = 'Undefined' color = curses.COLOR_CYAN screen.addstr(row,", "self.gps.longitude altitude = round(self.gps.altitude, 2) status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f}", "callback=self.state_callback, queue_size=1) self.battery = BatteryState() self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback,", "= rospy.Rate(self.rate) try: while not rospy.is_shutdown(): self.print_status() rate.sleep() except rospy.ROSInterruptException:", "status_battery = str(battery_percentage) + '%' screen.addstr(row, x_tab, 'Battery: ') screen.addstr(row,", "callback=self.local_pose_callback, queue_size=1) self.global_pose = PoseStamped() self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback,", "in freqs.items()] screen.addstr(row, x_tab, 'Cameras: ') screen.addstr(row, x_indent, f'{ff} {fr}", "severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow = [msg.WARNING, msg.NOTICE]", "= mode.split('.')[-1] mode = mode.capitalize() if mode == 'Offboard': color", "status_extended = 'Undefined' color = curses.COLOR_CYAN screen.addstr(row, x_tab, 'State: ')", "self.lines = 0 self.text = '' self.screen.keypad(True) curses.curs_set(False) # Hide", "if self.state.armed: color = curses.COLOR_RED status_armed = 'Yes' else: color", "= pose_msg def global_pose_callback(self, pose_msg): self.global_pose = pose_msg def setpoint_callback(self,", "callback=self.statustext_callback, # queue_size=1) self.gps = NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix,", "1 screen.refresh() def print_status(self): screen = self.status screen.clear() # rospy.loginfo(status)", "subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2 ** 24)", "'Battery: ') screen.addstr(row, x_indent, status_battery, get_color(color)) row += 1 #", "screen.addstr(row, x_tab, 'Local pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f}", "self.screen.refresh() def run(self): rate = rospy.Rate(self.rate) try: while not rospy.is_shutdown():", "if mode == 'Offboard': color = curses.COLOR_RED else: color =", "('RTK float', curses.COLOR_YELLOW), 6: ('RTK fix', curses.COLOR_GREEN) } def get_color(color):", "- 1): duration = messages[i + 1].header.stamp - messages[i].header.stamp durations.append(duration.to_sec())", "round(p.x, 2), round(p.y, 2), round(p.z, 2) yaw, pitch, roll =", "# GPS pos latitude = self.gps.latitude longitude = self.gps.longitude altitude", "str(battery_percentage) + '%' screen.addstr(row, x_tab, 'Battery: ') screen.addstr(row, x_indent, status_battery,", "rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose = PoseStamped() self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose',", "self.state.armed: color = curses.COLOR_RED status_armed = 'Yes' else: color =", "= setpoint_msg def image_callback(self, image_msg, camera): self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg):", "0: color = curses.COLOR_RED status_battery = str(battery_percentage) + '%' screen.addstr(row,", "np import rospy from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus from geometry_msgs.msg", "self.diagnostic_gps.values: if value.key == 'Satellites visible': satellites = value.value elif", "{vz:.2f} (XYZ) {yaw} (Y)') row += 1 # Cameras freqs", "import DiagnosticArray, DiagnosticStatus from geometry_msgs.msg import PoseStamped from mavros_msgs.msg import", "= round(p.x, 2), round(p.y, 2), round(p.z, 2) yaw, pitch, roll", "= 14 row = 0 # Battery battery_percentage = int(self.battery.percentage", "severity_neutral: color = curses.COLOR_WHITE self.text = f'{time_str}: {text} ({color})' #", "= frequency_from_messages(messages) ff, fr, fb, fl = [int(round(v)) for k,", "if np.isnan(frequency): return 0 return frequency class StatusNode: def __init__(self,", "rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback, # queue_size=1) self.gps = NavSatFix() self.gps_sub", "= [int(round(v)) for k, v in freqs.items()] screen.addstr(row, x_tab, 'Cameras:", "# Local pose p = self.local_pose.pose.position q = self.local_pose.pose.orientation quaternion", "{yaw} (RPY)') row += 1 # Global pose p =", "text = statustext_msg.text severity = statustext_msg.severity msg = statustext_msg severity_red", "statustext_callback(self, statustext_msg): screen = self.console time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') #", "[] self.images = {c: deque(maxlen=10) for c in self.cameras} for", "self.extended.LANDED_STATE_LANDING: status_extended = 'Landed' color = curses.COLOR_GREEN elif self.extended.landed_state ==", "= 0 x_indent = 14 row = 0 # Battery", "= 'Landed' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended", "State() self.state_sub = rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1) self.battery = BatteryState()", "get_color(color): return curses.color_pair(color) def frequency_from_messages(messages): durations = [] for i", "get_color(color)) row += 1 # GPS info satellites = 0", "= self.local_pose.pose.position q = self.local_pose.pose.orientation quaternion = [q.x, q.y, q.z,", "StatusText() # self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText, # callback=self.statustext_callback, # queue_size=1)", "DiagnosticArray() self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1)", "R from sensor_msgs.msg import BatteryState, Image, NavSatFix GPS_FIX_DICT = {", "def local_pose_callback(self, pose_msg): self.local_pose = pose_msg def global_pose_callback(self, pose_msg): self.global_pose", "in range(len(messages) - 1): duration = messages[i + 1].header.stamp -", "self.setpoint = setpoint_msg def image_callback(self, image_msg, camera): self.images[camera].append(image_msg) def statustext_callback(self,", "screen.addstr(row, x_tab, 'GPS pos: ') screen.addstr(row, x_indent, status_gps) row +=", "def gps_callback(self, gps_msg): self.gps = gps_msg def local_pose_callback(self, pose_msg): self.local_pose", "in status.name: self.diagnostic_gps = status def gps_callback(self, gps_msg): self.gps =", "msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow = [msg.WARNING, msg.NOTICE] severity_neutral = [msg.INFO,", "severity in severity_yellow: color = curses.COLOR_YELLOW elif severity in severity_neutral:", "= curses.COLOR_BLUE if mode == '': mode = 'None' elif", "import deque from datetime import datetime import numpy as np", "self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground' color = curses.COLOR_GREEN elif self.extended.landed_state ==", "= f'camera_{camera}/image_raw' subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback, callback_args=camera, queue_size=1, buff_size=2", "= rospy.Subscriber('mavros/battery', BatteryState, callback=self.battery_callback, queue_size=1) self.extended = ExtendedState() self.extended_sub =", "= { 0: ('No GPS', curses.COLOR_RED), 1: ('No fix', curses.COLOR_RED),", "from geometry_msgs.msg import PoseStamped from mavros_msgs.msg import ExtendedState, PositionTarget, State", "= 'None' elif mode == 'Posctl': mode = 'Position' elif", "severity_neutral = [msg.INFO, msg.DEBUG] color = curses.COLOR_CYAN if severity in", "in self.cameras} for cam, messages in self.images.items(): freqs[cam] = frequency_from_messages(messages)", "status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row, x_tab, 'GPS pos:", "1 # Mode color = curses.COLOR_CYAN mode = self.state.mode if", "callback=self.extended_callback, queue_size=1) # self.statustext = StatusText() # self.statustext_sub = rospy.Subscriber('mavros/statustext/recv',", "= rospy.Subscriber('mavros/local_position/pose', PoseStamped, callback=self.local_pose_callback, queue_size=1) self.global_pose = PoseStamped() self.global_pose_sub =", "int(np.rad2deg(self.setpoint.yaw)) screen.addstr(row, x_tab, 'Setpoint: ') screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f}", "curses.color_pair(color) def frequency_from_messages(messages): durations = [] for i in range(len(messages)", "ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1) # self.statustext =", "callback_args=camera, queue_size=1, buff_size=2 ** 24) self.image_subscribers.append(subscriber) def battery_callback(self, battery_msg): if", "screen.clear() # rospy.loginfo(status) # print(status) x_tab = 0 x_indent =", "i in range(len(messages) - 1): duration = messages[i + 1].header.stamp", "= 'Undefined' color = curses.COLOR_CYAN screen.addstr(row, x_tab, 'State: ') screen.addstr(row,", "from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText from scipy.spatial.transform", "screen.addstr(row, x_tab, 'Cameras: ') screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl}", "msg.CRITICAL, msg.ERROR] severity_yellow = [msg.WARNING, msg.NOTICE] severity_neutral = [msg.INFO, msg.DEBUG]", "# StatusText from scipy.spatial.transform import Rotation as R from sensor_msgs.msg", "diagnostics_msg): for status in diagnostics_msg.status: if 'GPS' in status.name: self.diagnostic_gps", "elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground' color = curses.COLOR_GREEN", "'Ground' color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended =", "color = curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground'", "= int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Global pos: ') screen.addstr(row,", "image_msg, camera): self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg): screen = self.console time_str", "'Posctl': mode = 'Position' elif mode == 'Rtl': mode =", "(Y)') row += 1 # Cameras freqs = {c: 0", "v in freqs.items()] screen.addstr(row, x_tab, 'Cameras: ') screen.addstr(row, x_indent, f'{ff}", "Armed if self.state.armed: color = curses.COLOR_RED status_armed = 'Yes' else:", "fix', curses.COLOR_RED), 2: ('2D lock', curses.COLOR_BLUE), 3: ('3D lock', curses.COLOR_BLUE),", "self.global_pose.pose.position q = self.global_pose.pose.orientation quaternion = [q.x, q.y, q.z, q.w]", "screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)') row +=", "self.screen = curses.initscr() self.rows, self.cols = self.screen.getmaxyx() height_status = 15", "self.print_status() rate.sleep() except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo() def curses_main(screen): StatusNode(screen).run()", "except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False) curses.echo() def curses_main(screen): StatusNode(screen).run() def main():", "= '{}'.format(mode) screen.addstr(row, x_tab, 'Mode: ') screen.addstr(row, x_indent, status_mode, get_color(color))", "NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1) self.local_pose = PoseStamped()", "PoseStamped from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText from", "elif value.key == 'Fix type': fix_type, color = GPS_FIX_DICT[int(value.value)] screen.addstr(row,", "int(pitch), int(roll) screen.addstr(row, x_tab, 'Local pos: ') screen.addstr(row, x_indent, f'{x:.2f}", "import rospy from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus from geometry_msgs.msg import", "self.diagnostic_gps = DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint", "int(pitch), int(roll) screen.addstr(row, x_tab, 'Global pos: ') screen.addstr(row, x_indent, f'{x:.2f}", "status_extended, get_color(color)) row += 1 # GPS info satellites =", "= self.gps.latitude longitude = self.gps.longitude altitude = round(self.gps.altitude, 2) status_gps", "1 # Local pose p = self.local_pose.pose.position q = self.local_pose.pose.orientation", "mode = mode.split('.')[-1] mode = mode.capitalize() if mode == 'Offboard':", "curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses color setup curses.use_default_colors() for color in", "screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color)) row += 2 #", "curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses color", "curses.initscr() self.rows, self.cols = self.screen.getmaxyx() height_status = 15 self.status =", "= battery_msg def state_callback(self, state_msg): self.state = state_msg def extended_callback(self,", "curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF: status_extended = 'Takeoff' color =", "p = self.global_pose.pose.position q = self.global_pose.pose.orientation quaternion = [q.x, q.y,", "= self.screen.getmaxyx() height_status = 15 self.status = curses.newwin(height_status, self.cols, 1,", "50: color = curses.COLOR_GREEN elif battery_percentage > 25: color =", "= self.console time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str = datetime.datetime.fromtimestamp(unix_time)", "'Return' status_mode = '{}'.format(mode) screen.addstr(row, x_tab, 'Mode: ') screen.addstr(row, x_indent,", "self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air' color = curses.COLOR_RED elif", "elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended = 'Landed' color = curses.COLOR_GREEN", "image_callback(self, image_msg, camera): self.images[camera].append(image_msg) def statustext_callback(self, statustext_msg): screen = self.console", "/ np.mean(durations) if np.isnan(frequency): return 0 return frequency class StatusNode:", "= statustext_msg.text severity = statustext_msg.severity msg = statustext_msg severity_red =", "== self.extended.LANDED_STATE_LANDING: status_extended = 'Landed' color = curses.COLOR_GREEN elif self.extended.landed_state", "# Global pose p = self.global_pose.pose.position q = self.global_pose.pose.orientation quaternion", "self.extended = ExtendedState() self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState, callback=self.extended_callback, queue_size=1) #", "mode == 'Offboard': color = curses.COLOR_RED else: color = curses.COLOR_BLUE", "R.from_quat(quaternion) except ValueError: rot = R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw,", "x_tab, 'Local pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ)", "'No' screen.addstr(row, x_tab, 'Armed: ') screen.addstr(row, x_indent, status_armed, get_color(color)) row", "= 'Air' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING: status_extended", "# queue_size=1) self.gps = NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback,", "extended_msg): self.extended = extended_msg def diagnostics_callback(self, diagnostics_msg): for status in", "height_status = 15 self.status = curses.newwin(height_status, self.cols, 1, 2) #", "color = curses.COLOR_BLUE if mode == '': mode = 'None'", "+= 1 # Local pose p = self.local_pose.pose.position q =", "= int(self.battery.percentage * 100) color = curses.COLOR_CYAN if battery_percentage >", "= R.from_quat(quaternion) except ValueError: rot = R.from_euler('zyx', [0.0, 0.0, 0.0])", "2) # self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2)", "q.w] try: rot = R.from_quat(quaternion) except ValueError: rot = R.from_euler('zyx',", "color = curses.COLOR_YELLOW elif battery_percentage > 0: color = curses.COLOR_RED", "round(self.gps.altitude, 2) status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)' screen.addstr(row, x_tab,", "= rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint = PositionTarget() self.setpoint_sub =", "'Mode: ') screen.addstr(row, x_indent, status_mode, get_color(color)) row += 1 #", "pos latitude = self.gps.latitude longitude = self.gps.longitude altitude = round(self.gps.altitude,", "x_tab, 'GPS pos: ') screen.addstr(row, x_indent, status_gps) row += 1", "if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR: status_extended = 'Air' color = curses.COLOR_RED", "ValueError: rot = R.from_euler('zyx', [0.0, 0.0, 0.0]) yaw, pitch, roll", "1 # Armed if self.state.armed: color = curses.COLOR_RED status_armed =", "import Rotation as R from sensor_msgs.msg import BatteryState, Image, NavSatFix", "mode = 'Return' status_mode = '{}'.format(mode) screen.addstr(row, x_tab, 'Mode: ')", "state_msg def extended_callback(self, extended_msg): self.extended = extended_msg def diagnostics_callback(self, diagnostics_msg):", "= 'Position' elif mode == 'Rtl': mode = 'Return' status_mode", "mode == 'Rtl': mode = 'Return' status_mode = '{}'.format(mode) screen.addstr(row,", "datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text severity = statustext_msg.severity msg = statustext_msg", "f'{time_str}: {text} ({color})' # screen.addstr(self.lines, 0, log, get_color(color)) self.lines +=", "print_status(self): screen = self.status screen.clear() # rospy.loginfo(status) # print(status) x_tab", "= curses.COLOR_CYAN screen.addstr(row, x_tab, 'State: ') screen.addstr(row, x_indent, status_extended, get_color(color))", "satellites = value.value elif value.key == 'Fix type': fix_type, color", "screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')", "f'{ff} {fr} {fb} {fl} (front right back left [Hz])') row", "queue_size=1) self.gps = NavSatFix() self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix, callback=self.gps_callback, queue_size=1)", "curses.echo() def curses_main(screen): StatusNode(screen).run() def main(): try: curses.wrapper(curses_main) except rospy.ROSInterruptException:", "vz = round(v.x, 2), round(v.y, 2), round(v.z, 2) yaw =", "DiagnosticStatus from geometry_msgs.msg import PoseStamped from mavros_msgs.msg import ExtendedState, PositionTarget,", "rospy.loginfo(status) # print(status) x_tab = 0 x_indent = 14 row", "# GPS info satellites = 0 fix_type, color = GPS_FIX_DICT[0]", "0 for c in self.cameras} for cam, messages in self.images.items():", "Global pose p = self.global_pose.pose.position q = self.global_pose.pose.orientation quaternion =", "PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras = ['front', 'right', 'back', 'left'] self.image_subscribers", "== 'Rtl': mode = 'Return' status_mode = '{}'.format(mode) screen.addstr(row, x_tab,", "x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)') row += 1", "try: while not rospy.is_shutdown(): self.print_status() rate.sleep() except rospy.ROSInterruptException: curses.nocbreak() self.screen.keypad(False)", "# screen.addstr(self.lines, 0, log, get_color(color)) self.lines += 1 screen.refresh() def", "type': fix_type, color = GPS_FIX_DICT[int(value.value)] screen.addstr(row, x_tab, 'GPS info: ')", "self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras = ['front', 'right',", "%H:%M:%S') # time_str = datetime.datetime.fromtimestamp(unix_time) text = statustext_msg.text severity =", "screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right back left", "+ '%' screen.addstr(row, x_tab, 'Battery: ') screen.addstr(row, x_indent, status_battery, get_color(color))", "mode.split('.')[-1] mode = mode.capitalize() if mode == 'Offboard': color =", "{roll} {pitch} {yaw} (RPY)') row += 1 # Global pose", "{vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)') row += 1 # Cameras", "+= 1 # GPS info satellites = 0 fix_type, color", "cursor colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE,", "screen.refresh() def print_status(self): screen = self.status screen.clear() # rospy.loginfo(status) #", "'Local pos: ') screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll}", "('DGPS', curses.COLOR_MAGENTA), 5: ('RTK float', curses.COLOR_YELLOW), 6: ('RTK fix', curses.COLOR_GREEN)", "{c: deque(maxlen=10) for c in self.cameras} for camera in self.cameras:", "rate = rospy.Rate(self.rate) try: while not rospy.is_shutdown(): self.print_status() rate.sleep() except", "self.cols, 1, 2) # self.console = curses.newwin(self.rows - height_status, self.cols,", "fix_type, color = GPS_FIX_DICT[0] for value in self.diagnostic_gps.values: if value.key", "x_indent, status_battery, get_color(color)) row += 1 # Armed if self.state.armed:", "= State() self.state_sub = rospy.Subscriber('mavros/state', State, callback=self.state_callback, queue_size=1) self.battery =", "self.state = state_msg def extended_callback(self, extended_msg): self.extended = extended_msg def", "= curses.COLOR_GREEN elif battery_percentage > 25: color = curses.COLOR_YELLOW elif", "self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint = PositionTarget() self.setpoint_sub", "x_indent, status_extended, get_color(color)) row += 1 # GPS info satellites", "= 'Takeoff' color = curses.COLOR_RED elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED: status_extended", "scipy.spatial.transform import Rotation as R from sensor_msgs.msg import BatteryState, Image,", "curses.COLOR_CYAN, curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses color setup", "value.value elif value.key == 'Fix type': fix_type, color = GPS_FIX_DICT[int(value.value)]", "{pitch} {yaw} (RPY)') row += 1 # Global pose p", "= curses.COLOR_GREEN elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND: status_extended = 'Ground' color", "self.setpoint.velocity vx, vy, vz = round(v.x, 2), round(v.y, 2), round(v.z,", "Curses color setup curses.use_default_colors() for color in colors: curses.init_pair(color, color,", "0 self.text = '' self.screen.keypad(True) curses.curs_set(False) # Hide cursor colors", "curses.COLOR_YELLOW), 6: ('RTK fix', curses.COLOR_GREEN) } def get_color(color): return curses.color_pair(color)", "[0.0, 0.0, 0.0]) yaw, pitch, roll = rot.as_euler('zyx', degrees=True) x,", "msg = statustext_msg severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR] severity_yellow", "self.text = f'{time_str}: {text} ({color})' # screen.addstr(self.lines, 0, log, get_color(color))", "severity in severity_red: color = curses.COLOR_RED elif severity in severity_yellow:", "curses.COLOR_MAGENTA, curses.COLOR_RED, curses.COLOR_WHITE, curses.COLOR_YELLOW] # Curses color setup curses.use_default_colors() for", "geometry_msgs.msg import PoseStamped from mavros_msgs.msg import ExtendedState, PositionTarget, State #", "DiagnosticStatus() self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray, callback=self.diagnostics_callback, queue_size=1) self.setpoint = PositionTarget()", "= rospy.Subscriber('global_position/pose', PoseStamped, callback=self.global_pose_callback, queue_size=1) self.diagnostics = DiagnosticArray() self.diagnostic_gps =", "+= 1 # Setpoint v = self.setpoint.velocity vx, vy, vz", "get_color(color)) self.lines += 1 screen.refresh() def print_status(self): screen = self.status", "= PositionTarget() self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget, callback=self.setpoint_callback, queue_size=1) self.cameras =", "= int(yaw), int(pitch), int(roll) screen.addstr(row, x_tab, 'Local pos: ') screen.addstr(row,", "+= 1 # Cameras freqs = {c: 0 for c", "= curses.COLOR_CYAN mode = self.state.mode if mode.startswith('AUTO'): mode = mode.split('.')[-1]", "color = curses.COLOR_CYAN screen.addstr(row, x_tab, 'State: ') screen.addstr(row, x_indent, status_extended,", "self.console time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # time_str = datetime.datetime.fromtimestamp(unix_time) text", "{yaw} (Y)') row += 1 # Cameras freqs = {c:", "diagnostics_callback(self, diagnostics_msg): for status in diagnostics_msg.status: if 'GPS' in status.name:", "right back left [Hz])') row += 1 screen.refresh() self.screen.refresh() def" ]
[ "it is the same here) # todo: adapt to your", "\"\"\"Initializes a new instance of the Result Parameter Args: path", "# load the CSVs. We usually want to compare different", "for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)),", "and format litle, labels, and ticks ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label,", "fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45) # remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False)", "str, y_label: str, x_ticks: tuple, min_: float = None, max_:", "inversed ax = fig.add_subplot(111) # create an axes instance (nrows=ncols=index)", "usually want to compare different methods (e.g. a set of", "is not None and min_ < min_original else min_original max_", "2 *1.5, 5*1.5)) # figsize defaults to (width, height) =(6.4,", "if min_ is not None and min_ < min_original else", "metric, (min_, max_) in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df,", "'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are interested in #", "qq-plots \"\"\" metrics = ('DICE', 'HDRFDST') # the metrics we", "not None: min_original, max_original = ax.get_ylim() min_ = min_ if", "else min_original max_ = max_ if max_ is not None", "different CSVs) dfs = [] methods = [] for res", "class ResultParam(): \"\"\"Result Parameter\"\"\" def __init__(self, path: Path, param_str: str):", "'Dice coefficient' elif metric == 'HDRFDST': return 'Hausdorff distance (mm)'", "if min_ is not None or max_ is not None:", "tuples of y-axis limits (min, max) for each metric. Use", "of the Result Parameter Args: path (Path): path to the", "ath to the desired result folder to store the qq-plots", "param_str def set_box_format(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color)", "ResultParam(): \"\"\"Result Parameter\"\"\" def __init__(self, path: Path, param_str: str): \"\"\"Initializes", "Parameter\"\"\" def __init__(self, path: Path, param_str: str): \"\"\"Initializes a new", "parameters (Path and description) plot_dir: ath to the desired result", "ax = fig.add_subplot(111) # create an axes instance (nrows=ncols=index) bp", "(width, height) =(6.4, 4.8), # for boxplots, we want the", "since it should be clear from the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks,", "interested in # load the CSVs. We usually want to", "results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\")) results.append(ResultParam(Path(Path.cwd() /\"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv\"), \"with pp\")) main(results,", "str): return data[data['LABEL'] == label][metric].values def metric_to_readable_text(metric: str): if metric", "str, x_label: str, y_label: str, x_ticks: tuple, min_: float =", "of different features), therefore, # we load two CSV (for", "None and min_ < min_original else min_original max_ = max_", "containing the parameters used in the postprocessing \"\"\" self.path =", "metric) for df in dfs], title.format(label), 'Method', metric_to_readable_text(metric), methods, min_,", "max_original ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches=\"tight\") plt.close() def format_data(data, label: str,", "parameter values from text file, use them to plot the", "different features), therefore, # we load two CSV (for simplicity,", "return data[data['LABEL'] == label][metric].values def metric_to_readable_text(metric: str): if metric ==", "metric: str): return data[data['LABEL'] == label][metric].values def metric_to_readable_text(metric: str): if", "min_original else min_original max_ = max_ if max_ is not", "to plot the results for metrics_yaxis_limits = ((0.0, 1.0), (0.0,", "4.8), # for boxplots, we want the ratio to be", "the plot's readability title = '{}' for label in labels:", "ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust min", "set_box_format(bp, '000') # set and format litle, labels, and ticks", "not None or max_ is not None: min_original, max_original =", "fontweight='bold', fontsize=9.5) # we don't use the x-label since it", "and min_ < min_original else min_original max_ = max_ if", "metric == 'HDRFDST': return 'Hausdorff distance (mm)' else: raise ValueError('Metric", "__name__ == '__main__': results = [] results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no", "metrics = ('DICE', 'HDRFDST') # the metrics we want to", "fontsize=18) # ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't use the", "results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) # todo: read parameter values from", "simplicity, it is the same here) # todo: adapt to", "ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18) # ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5)", "to improve the plot's readability title = '{}' for label", "<filename>bin/boxplot_param.py import argparse import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot", "'bold'}, rotation=45) # remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken frame", "os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pandas", "plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5)", "list, title: str, x_label: str, y_label: str, x_ticks: tuple, min_:", "defaults to (width, height) =(6.4, 4.8), # for boxplots, we", "max_ ) if __name__ == '__main__': results = [] results.append(ResultParam(Path(Path.cwd()", "desired result folder to store the qq-plots \"\"\" metrics =", "load different CSVs) dfs = [] methods = [] for", "\"\"\"generates box plots comparing two or more result sets for", "result folder to store the qq-plots \"\"\" metrics = ('DICE',", "frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust min and max if provided", "min and max if provided if min_ is not None", "def main(results: [ResultParam], plot_dir: Path): \"\"\"generates box plots comparing two", "Use None if unknown labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the", "color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'],", "if len(data) != len(x_ticks): raise ValueError('arguments data and x_ticks need", "want to compare different methods (e.g. a set of different", "in dfs], title.format(label), 'Method', metric_to_readable_text(metric), methods, min_, max_ ) if", "dfs], title.format(label), 'Method', metric_to_readable_text(metric), methods, min_, max_ ) if __name__", "two CSV (for simplicity, it is the same here) #", ") if __name__ == '__main__': results = [] results.append(ResultParam(Path(Path.cwd() /", "to your needs to compare different methods (e.g. load different", "metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df, label, metric) for df in", "use the x-label since it should be clear from the", "'000') # set and format litle, labels, and ticks ax.set_title(title,", "a new instance of the Result Parameter Args: path (Path):", "max_original else max_original ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches=\"tight\") plt.close() def format_data(data,", "the information about the paramter # some parameters to improve", "min_: float = None, max_: float = None): if len(data)", "[] results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\")) results.append(ResultParam(Path(Path.cwd() /\"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv\"), \"with pp\"))", "x-label since it should be clear from the x-ticks ax.yaxis.set_tick_params(labelsize=12)", "max_ = max_ if max_ is not None and max_", "None and max_ > max_original else max_original ax.set_ylim(min_, max_) plt.savefig(file_path,", "== '__main__': results = [] results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\"))", "set of different features), therefore, # we load two CSV", "max_ is not None: min_original, max_original = ax.get_ylim() min_ =", "we want the ratio to be inversed ax = fig.add_subplot(111)", "= [] methods = [] for res in results: dfs.append(pd.read_csv(res.path,", "=(6.4, 4.8), # for boxplots, we want the ratio to", "float = None): if len(data) != len(x_ticks): raise ValueError('arguments data", "data and x_ticks need to have same length') fig =", "= [] for res in results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) #", "'{}' for label in labels: for metric, (min_, max_) in", "None: min_original, max_original = ax.get_ylim() min_ = min_ if min_", "brain structures/tissues you are interested in # load the CSVs.", "ratio to be inversed ax = fig.add_subplot(111) # create an", "# for boxplots, we want the ratio to be inversed", "= param_str def set_box_format(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'],", "len(data) != len(x_ticks): raise ValueError('arguments data and x_ticks need to", "dfs = [] methods = [] for res in results:", "ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45) # remove frame ax.spines['top'].set_visible(False)", "max_ is not None and max_ > max_original else max_original", "provided if min_ is not None or max_ is not", "= path self.param_str = param_str def set_box_format(bp, color): plt.setp(bp['boxes'], color=color)", "plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def boxplot(file_path:", "is not None or max_ is not None: min_original, max_original", "for each metric. Use None if unknown labels = ('WhiteMatter','GreyMatter',", "label in labels: for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits):", "plot's readability title = '{}' for label in labels: for", "result sets for all labels Args: results ([ResultParam]): a list", "x_ticks need to have same length') fig = plt.figure( figsize=(", "# create an axes instance (nrows=ncols=index) bp = ax.boxplot(data, widths=0.6)", "Args: results ([ResultParam]): a list of result parameters (Path and", "== label][metric].values def metric_to_readable_text(metric: str): if metric == 'DICE': return", "plt.figure( figsize=( 2 *1.5, 5*1.5)) # figsize defaults to (width,", "'fontweight': 'bold'}, rotation=45) # remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken", "fig.add_subplot(111) # create an axes instance (nrows=ncols=index) bp = ax.boxplot(data,", "max_: float = None): if len(data) != len(x_ticks): raise ValueError('arguments", "as pd from pathlib import Path class ResultParam(): \"\"\"Result Parameter\"\"\"", "methods (e.g. load different CSVs) dfs = [] methods =", "them to plot the information about the paramter # some", "# the brain structures/tissues you are interested in # load", "axes instance (nrows=ncols=index) bp = ax.boxplot(data, widths=0.6) set_box_format(bp, '000') #", "result parameters (Path and description) plot_dir: ath to the desired", "height) =(6.4, 4.8), # for boxplots, we want the ratio", "coefficient' elif metric == 'HDRFDST': return 'Hausdorff distance (mm)' else:", "ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't use the x-label since", "want the ratio to be inversed ax = fig.add_subplot(111) #", "data: list, title: str, x_label: str, y_label: str, x_ticks: tuple,", "return 'Dice coefficient' elif metric == 'HDRFDST': return 'Hausdorff distance", "= ax.boxplot(data, widths=0.6) set_box_format(bp, '000') # set and format litle,", "else: raise ValueError('Metric \"{}\" unknown'.format(metric)) def main(results: [ResultParam], plot_dir: Path):", "or more result sets for all labels Args: results ([ResultParam]):", "litle, labels, and ticks ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18)", "same length') fig = plt.figure( figsize=( 2 *1.5, 5*1.5)) #", "(mm)' else: raise ValueError('Metric \"{}\" unknown'.format(metric)) def main(results: [ResultParam], plot_dir:", "= fig.add_subplot(111) # create an axes instance (nrows=ncols=index) bp =", "folder to store the qq-plots \"\"\" metrics = ('DICE', 'HDRFDST')", "plt import pandas as pd from pathlib import Path class", "argparse import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt", "'HDRFDST': return 'Hausdorff distance (mm)' else: raise ValueError('Metric \"{}\" unknown'.format(metric))", "two or more result sets for all labels Args: results", "remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) #", "rotation=45) # remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken frame ax.spines['left'].set_linewidth(2)", "plt.setp(bp['fliers'], alpha=1) def boxplot(file_path: str, data: list, title: str, x_label:", "widths=0.6) set_box_format(bp, '000') # set and format litle, labels, and", "'HDRFDST') # the metrics we want to plot the results", "if metric == 'DICE': return 'Dice coefficient' elif metric ==", "you are interested in # load the CSVs. We usually", "\"\"\" self.path = path self.param_str = param_str def set_box_format(bp, color):", "need to have same length') fig = plt.figure( figsize=( 2", "needs to compare different methods (e.g. load different CSVs) dfs", "here) # todo: adapt to your needs to compare different", "CSVs. We usually want to compare different methods (e.g. a", "= min_ if min_ is not None and min_ <", "Result Parameter Args: path (Path): path to the desired result", "the brain structures/tissues you are interested in # load the", "different methods (e.g. a set of different features), therefore, #", "matplotlib.use('Agg') import matplotlib.pyplot as plt import pandas as pd from", "= ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are interested", "from the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45)", "Path): \"\"\"generates box plots comparing two or more result sets", "values from text file, use them to plot the information", "postprocessing \"\"\" self.path = path self.param_str = param_str def set_box_format(bp,", "length') fig = plt.figure( figsize=( 2 *1.5, 5*1.5)) # figsize", "(min, max) for each metric. Use None if unknown labels", "clear from the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'},", "methods = [] for res in results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str)", "load the CSVs. We usually want to compare different methods", "compare different methods (e.g. a set of different features), therefore,", "= max_ if max_ is not None and max_ >", "import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import", "adjust min and max if provided if min_ is not", "label][metric].values def metric_to_readable_text(metric: str): if metric == 'DICE': return 'Dice", "sets for all labels Args: results ([ResultParam]): a list of", "# remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2)", "the paramter # some parameters to improve the plot's readability", "max_) in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df, label, metric)", "(0.0, 18)) # tuples of y-axis limits (min, max) for", "def boxplot(file_path: str, data: list, title: str, x_label: str, y_label:", "text file, use them to plot the information about the", "metric_to_readable_text(metric: str): if metric == 'DICE': return 'Dice coefficient' elif", "plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1)", "all labels Args: results ([ResultParam]): a list of result parameters", "file param_str (str): string containing the parameters used in the", "instance of the Result Parameter Args: path (Path): path to", "therefore, # we load two CSV (for simplicity, it is", "pandas as pd from pathlib import Path class ResultParam(): \"\"\"Result", "about the paramter # some parameters to improve the plot's", "for all labels Args: results ([ResultParam]): a list of result", "compare different methods (e.g. load different CSVs) dfs = []", "> max_original else max_original ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches=\"tight\") plt.close() def", "pd from pathlib import Path class ResultParam(): \"\"\"Result Parameter\"\"\" def", "x_label: str, y_label: str, x_ticks: tuple, min_: float = None,", "unknown'.format(metric)) def main(results: [ResultParam], plot_dir: Path): \"\"\"generates box plots comparing", "frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust", "plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.')", "# we don't use the x-label since it should be", "ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust min and max if provided if", "are interested in # load the CSVs. We usually want", "boxplots, we want the ratio to be inversed ax =", "# thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust min and max", "plot the results for metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18))", "ax.spines['bottom'].set_linewidth(2) # adjust min and max if provided if min_", "max_ if max_ is not None and max_ > max_original", "'{}_{}.png'.format(label, metric)), [format_data(df, label, metric) for df in dfs], title.format(label),", "dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) # todo: read parameter values from text", "# adjust min and max if provided if min_ is", "metric == 'DICE': return 'Dice coefficient' elif metric == 'HDRFDST':", "x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45) # remove", "desired result file param_str (str): string containing the parameters used", "a list of result parameters (Path and description) plot_dir: ath", "([ResultParam]): a list of result parameters (Path and description) plot_dir:", "plot the information about the paramter # some parameters to", "title: str, x_label: str, y_label: str, x_ticks: tuple, min_: float", "of result parameters (Path and description) plot_dir: ath to the", "be inversed ax = fig.add_subplot(111) # create an axes instance", "it should be clear from the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize':", "'DICE': return 'Dice coefficient' elif metric == 'HDRFDST': return 'Hausdorff", "ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45) # remove frame", "used in the postprocessing \"\"\" self.path = path self.param_str =", "fig = plt.figure( figsize=( 2 *1.5, 5*1.5)) # figsize defaults", "color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'],", "the desired result file param_str (str): string containing the parameters", "new instance of the Result Parameter Args: path (Path): path", "str, x_ticks: tuple, min_: float = None, max_: float =", "!= len(x_ticks): raise ValueError('arguments data and x_ticks need to have", "or max_ is not None: min_original, max_original = ax.get_ylim() min_", "Path class ResultParam(): \"\"\"Result Parameter\"\"\" def __init__(self, path: Path, param_str:", "y_label: str, x_ticks: tuple, min_: float = None, max_: float", "more result sets for all labels Args: results ([ResultParam]): a", "in labels: for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir,", "adapt to your needs to compare different methods (e.g. load", "should be clear from the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18,", "import argparse import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as", "a set of different features), therefore, # we load two", "str): if metric == 'DICE': return 'Dice coefficient' elif metric", "main(results: [ResultParam], plot_dir: Path): \"\"\"generates box plots comparing two or", "string containing the parameters used in the postprocessing \"\"\" self.path", "have same length') fig = plt.figure( figsize=( 2 *1.5, 5*1.5))", "we don't use the x-label since it should be clear", "max_) plt.savefig(file_path, bbox_inches=\"tight\") plt.close() def format_data(data, label: str, metric: str):", "[ResultParam], plot_dir: Path): \"\"\"generates box plots comparing two or more", "if max_ is not None and max_ > max_original else", "an axes instance (nrows=ncols=index) bp = ax.boxplot(data, widths=0.6) set_box_format(bp, '000')", "CSV (for simplicity, it is the same here) # todo:", "bbox_inches=\"tight\") plt.close() def format_data(data, label: str, metric: str): return data[data['LABEL']", "readability title = '{}' for label in labels: for metric,", "methods, min_, max_ ) if __name__ == '__main__': results =", "in # load the CSVs. We usually want to compare", "('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are interested in", "(str): string containing the parameters used in the postprocessing \"\"\"", "color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def", "the ratio to be inversed ax = fig.add_subplot(111) # create", "def format_data(data, label: str, metric: str): return data[data['LABEL'] == label][metric].values", "the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight': 'bold'}, rotation=45) #", "file, use them to plot the information about the paramter", "to be inversed ax = fig.add_subplot(111) # create an axes", "'Hausdorff distance (mm)' else: raise ValueError('Metric \"{}\" unknown'.format(metric)) def main(results:", "(nrows=ncols=index) bp = ax.boxplot(data, widths=0.6) set_box_format(bp, '000') # set and", "plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def boxplot(file_path: str, data: list, title:", "ticks ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18) # ax.set_xlabel(x_label, fontweight='bold',", "to the desired result folder to store the qq-plots \"\"\"", "(min_, max_) in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df, label,", "format_data(data, label: str, metric: str): return data[data['LABEL'] == label][metric].values def", "import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pandas as", "and x_ticks need to have same length') fig = plt.figure(", "def __init__(self, path: Path, param_str: str): \"\"\"Initializes a new instance", "and description) plot_dir: ath to the desired result folder to", "# todo: read parameter values from text file, use them", "min_ < min_original else min_original max_ = max_ if max_", "methods (e.g. a set of different features), therefore, # we", "labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you are", "the qq-plots \"\"\" metrics = ('DICE', 'HDRFDST') # the metrics", "(Path): path to the desired result file param_str (str): string", "to compare different methods (e.g. load different CSVs) dfs =", "for boxplots, we want the ratio to be inversed ax", "label, metric) for df in dfs], title.format(label), 'Method', metric_to_readable_text(metric), methods,", "results for metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) # tuples", "read parameter values from text file, use them to plot", "ax.spines['right'].set_visible(False) # thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust min and", "import pandas as pd from pathlib import Path class ResultParam():", "ValueError('Metric \"{}\" unknown'.format(metric)) def main(results: [ResultParam], plot_dir: Path): \"\"\"generates box", "zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df, label, metric) for df", "min_ is not None and min_ < min_original else min_original", "alpha=1) def boxplot(file_path: str, data: list, title: str, x_label: str,", "max) for each metric. Use None if unknown labels =", "ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches=\"tight\") plt.close() def format_data(data, label: str, metric:", "linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def boxplot(file_path: str,", "parameters used in the postprocessing \"\"\" self.path = path self.param_str", "return 'Hausdorff distance (mm)' else: raise ValueError('Metric \"{}\" unknown'.format(metric)) def", "the postprocessing \"\"\" self.path = path self.param_str = param_str def", "# figsize defaults to (width, height) =(6.4, 4.8), # for", "format litle, labels, and ticks ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold',", "= None, max_: float = None): if len(data) != len(x_ticks):", "plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black')", "metric_to_readable_text(metric), methods, min_, max_ ) if __name__ == '__main__': results", "# tuples of y-axis limits (min, max) for each metric.", "__init__(self, path: Path, param_str: str): \"\"\"Initializes a new instance of", "methods.append(res.param_str) # todo: read parameter values from text file, use", "= [] results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\")) results.append(ResultParam(Path(Path.cwd() /\"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv\"), \"with", "labels, and ticks ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18) #", "x_ticks: tuple, min_: float = None, max_: float = None):", "improve the plot's readability title = '{}' for label in", "the parameters used in the postprocessing \"\"\" self.path = path", "instance (nrows=ncols=index) bp = ax.boxplot(data, widths=0.6) set_box_format(bp, '000') # set", "plt.savefig(file_path, bbox_inches=\"tight\") plt.close() def format_data(data, label: str, metric: str): return", "bp = ax.boxplot(data, widths=0.6) set_box_format(bp, '000') # set and format", "limits (min, max) for each metric. Use None if unknown", "markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def boxplot(file_path: str, data: list, title: str,", "min_ if min_ is not None and min_ < min_original", "the x-label since it should be clear from the x-ticks", "features), therefore, # we load two CSV (for simplicity, it", "CSVs) dfs = [] methods = [] for res in", "title.format(label), 'Method', metric_to_readable_text(metric), methods, min_, max_ ) if __name__ ==", "None, max_: float = None): if len(data) != len(x_ticks): raise", "Parameter Args: path (Path): path to the desired result file", "and ticks ax.set_title(title, fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18) # ax.set_xlabel(x_label,", "raise ValueError('arguments data and x_ticks need to have same length')", "min_, max_ ) if __name__ == '__main__': results = []", "len(x_ticks): raise ValueError('arguments data and x_ticks need to have same", "figsize=( 2 *1.5, 5*1.5)) # figsize defaults to (width, height)", "self.path = path self.param_str = param_str def set_box_format(bp, color): plt.setp(bp['boxes'],", "metric. Use None if unknown labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') #", "plots comparing two or more result sets for all labels", "labels Args: results ([ResultParam]): a list of result parameters (Path", "metrics we want to plot the results for metrics_yaxis_limits =", "unknown labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues you", "5*1.5)) # figsize defaults to (width, height) =(6.4, 4.8), #", "figsize defaults to (width, height) =(6.4, 4.8), # for boxplots,", "to have same length') fig = plt.figure( figsize=( 2 *1.5,", "as plt import pandas as pd from pathlib import Path", "want to plot the results for metrics_yaxis_limits = ((0.0, 1.0),", "each metric. Use None if unknown labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus')", "fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18) # ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we", "from pathlib import Path class ResultParam(): \"\"\"Result Parameter\"\"\" def __init__(self,", "the CSVs. We usually want to compare different methods (e.g.", "store the qq-plots \"\"\" metrics = ('DICE', 'HDRFDST') # the", "information about the paramter # some parameters to improve the", "res in results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) # todo: read parameter", "to store the qq-plots \"\"\" metrics = ('DICE', 'HDRFDST') #", "if provided if min_ is not None or max_ is", "min_original max_ = max_ if max_ is not None and", "set_box_format(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1)", "# todo: adapt to your needs to compare different methods", "1.0), (0.0, 18)) # tuples of y-axis limits (min, max)", "None if unknown labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain", "(e.g. a set of different features), therefore, # we load", "fontweight='bold', fontsize=18) # ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't use", "is the same here) # todo: adapt to your needs", "# set and format litle, labels, and ticks ax.set_title(title, fontweight='bold',", "different methods (e.g. load different CSVs) dfs = [] methods", "= plt.figure( figsize=( 2 *1.5, 5*1.5)) # figsize defaults to", "the same here) # todo: adapt to your needs to", "load two CSV (for simplicity, it is the same here)", "for metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) # tuples of", "None): if len(data) != len(x_ticks): raise ValueError('arguments data and x_ticks", "elif metric == 'HDRFDST': return 'Hausdorff distance (mm)' else: raise", "('DICE', 'HDRFDST') # the metrics we want to plot the", "in the postprocessing \"\"\" self.path = path self.param_str = param_str", "path (Path): path to the desired result file param_str (str):", "max_ > max_original else max_original ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches=\"tight\") plt.close()", "[] for res in results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) # todo:", "min_ = min_ if min_ is not None and min_", "\"\"\"Result Parameter\"\"\" def __init__(self, path: Path, param_str: str): \"\"\"Initializes a", "(Path and description) plot_dir: ath to the desired result folder", "and max if provided if min_ is not None or", "= '{}' for label in labels: for metric, (min_, max_)", "pathlib import Path class ResultParam(): \"\"\"Result Parameter\"\"\" def __init__(self, path:", "box plots comparing two or more result sets for all", "path to the desired result file param_str (str): string containing", "structures/tissues you are interested in # load the CSVs. We", "data[data['LABEL'] == label][metric].values def metric_to_readable_text(metric: str): if metric == 'DICE':", "not None and max_ > max_original else max_original ax.set_ylim(min_, max_)", "plt.close() def format_data(data, label: str, metric: str): return data[data['LABEL'] ==", "list of result parameters (Path and description) plot_dir: ath to", "color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'],", "fontsize=9.5) # we don't use the x-label since it should", "18, 'fontweight': 'bold'}, rotation=45) # remove frame ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #", "tuple, min_: float = None, max_: float = None): if", "[format_data(df, label, metric) for df in dfs], title.format(label), 'Method', metric_to_readable_text(metric),", "'__main__': results = [] results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\")) results.append(ResultParam(Path(Path.cwd()", "self.param_str = param_str def set_box_format(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color)", "== 'HDRFDST': return 'Hausdorff distance (mm)' else: raise ValueError('Metric \"{}\"", "in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df, label, metric) for", "Path, param_str: str): \"\"\"Initializes a new instance of the Result", "# ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't use the x-label", "matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pandas as pd", "str, metric: str): return data[data['LABEL'] == label][metric].values def metric_to_readable_text(metric: str):", "parameters to improve the plot's readability title = '{}' for", "\"{}\" unknown'.format(metric)) def main(results: [ResultParam], plot_dir: Path): \"\"\"generates box plots", "float = None, max_: float = None): if len(data) !=", "to the desired result file param_str (str): string containing the", "str, data: list, title: str, x_label: str, y_label: str, x_ticks:", "== 'DICE': return 'Dice coefficient' elif metric == 'HDRFDST': return", "to plot the information about the paramter # some parameters", "description) plot_dir: ath to the desired result folder to store", "in results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) # todo: read parameter values", "min_original, max_original = ax.get_ylim() min_ = min_ if min_ is", "def set_box_format(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'],", "results ([ResultParam]): a list of result parameters (Path and description)", "= ((0.0, 1.0), (0.0, 18)) # tuples of y-axis limits", "label: str, metric: str): return data[data['LABEL'] == label][metric].values def metric_to_readable_text(metric:", "[] methods = [] for res in results: dfs.append(pd.read_csv(res.path, sep=';'))", "path: Path, param_str: str): \"\"\"Initializes a new instance of the", "the results for metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) #", "((0.0, 1.0), (0.0, 18)) # tuples of y-axis limits (min,", "title = '{}' for label in labels: for metric, (min_,", "/ \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\")) results.append(ResultParam(Path(Path.cwd() /\"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv\"), \"with pp\")) main(results, Path(Path.cwd()", "to (width, height) =(6.4, 4.8), # for boxplots, we want", "boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label, metric)), [format_data(df, label, metric) for df in dfs],", "thicken frame ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) # adjust min and max if", "# the metrics we want to plot the results for", "< min_original else min_original max_ = max_ if max_ is", "if unknown labels = ('WhiteMatter','GreyMatter', 'Hippocampus','Amygdala','Thalamus') # the brain structures/tissues", "todo: read parameter values from text file, use them to", "color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'],", "(e.g. load different CSVs) dfs = [] methods = []", "'Method', metric_to_readable_text(metric), methods, min_, max_ ) if __name__ == '__main__':", "set and format litle, labels, and ticks ax.set_title(title, fontweight='bold', fontsize=20)", "todo: adapt to your needs to compare different methods (e.g.", "str): \"\"\"Initializes a new instance of the Result Parameter Args:", "not None and min_ < min_original else min_original max_ =", "some parameters to improve the plot's readability title = '{}'", "linewidth=1) plt.setp(bp['medians'], color='red') plt.setp(bp['medians'], linewidth=1.5) plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'],", "create an axes instance (nrows=ncols=index) bp = ax.boxplot(data, widths=0.6) set_box_format(bp,", "the desired result folder to store the qq-plots \"\"\" metrics", "ax.get_ylim() min_ = min_ if min_ is not None and", "matplotlib.pyplot as plt import pandas as pd from pathlib import", "Args: path (Path): path to the desired result file param_str", "min_ is not None or max_ is not None: min_original,", "sep=';')) methods.append(res.param_str) # todo: read parameter values from text file,", "result file param_str (str): string containing the parameters used in", "for label in labels: for metric, (min_, max_) in zip(metrics,", "plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'], color=color) plt.setp(bp['caps'], color=color) plt.setp(bp['caps'], linewidth=1) plt.setp(bp['medians'], color='red')", "max_original = ax.get_ylim() min_ = min_ if min_ is not", "paramter # some parameters to improve the plot's readability title", "results = [] results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\")) results.append(ResultParam(Path(Path.cwd() /\"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv\"),", "is not None: min_original, max_original = ax.get_ylim() min_ = min_", "param_str (str): string containing the parameters used in the postprocessing", "None or max_ is not None: min_original, max_original = ax.get_ylim()", "the Result Parameter Args: path (Path): path to the desired", "labels: for metric, (min_, max_) in zip(metrics, metrics_yaxis_limits): boxplot(os.path.join(plot_dir, '{}_{}.png'.format(label,", "for df in dfs], title.format(label), 'Method', metric_to_readable_text(metric), methods, min_, max_", "path self.param_str = param_str def set_box_format(bp, color): plt.setp(bp['boxes'], color=color) plt.setp(bp['whiskers'],", "= None): if len(data) != len(x_ticks): raise ValueError('arguments data and", "plt.setp(bp['fliers'], marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def boxplot(file_path: str, data:", "comparing two or more result sets for all labels Args:", "(for simplicity, it is the same here) # todo: adapt", "df in dfs], title.format(label), 'Method', metric_to_readable_text(metric), methods, min_, max_ )", "max if provided if min_ is not None or max_", "import matplotlib.pyplot as plt import pandas as pd from pathlib", "18)) # tuples of y-axis limits (min, max) for each", "def metric_to_readable_text(metric: str): if metric == 'DICE': return 'Dice coefficient'", "= ax.get_ylim() min_ = min_ if min_ is not None", "use them to plot the information about the paramter #", "# we load two CSV (for simplicity, it is the", "your needs to compare different methods (e.g. load different CSVs)", "if __name__ == '__main__': results = [] results.append(ResultParam(Path(Path.cwd() / \"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"),", "is not None and max_ > max_original else max_original ax.set_ylim(min_,", "ax.set_ylabel(y_label, fontweight='bold', fontsize=18) # ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) # we don't", "metrics_yaxis_limits = ((0.0, 1.0), (0.0, 18)) # tuples of y-axis", "of y-axis limits (min, max) for each metric. Use None", "ValueError('arguments data and x_ticks need to have same length') fig", "the metrics we want to plot the results for metrics_yaxis_limits", "marker='.') plt.setp(bp['fliers'], markerfacecolor='black') plt.setp(bp['fliers'], alpha=1) def boxplot(file_path: str, data: list,", "plot_dir: ath to the desired result folder to store the", "\"no pp\")) results.append(ResultParam(Path(Path.cwd() /\"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv\"), \"with pp\")) main(results, Path(Path.cwd() / 'mia-result/plot_results'))", "fontweight='bold', fontsize=20) ax.set_ylabel(y_label, fontweight='bold', fontsize=18) # ax.set_xlabel(x_label, fontweight='bold', fontsize=9.5) #", "be clear from the x-ticks ax.yaxis.set_tick_params(labelsize=12) ax.set_xticklabels(x_ticks, fontdict={'fontsize': 18, 'fontweight':", "y-axis limits (min, max) for each metric. Use None if", "for res in results: dfs.append(pd.read_csv(res.path, sep=';')) methods.append(res.param_str) # todo: read", "from text file, use them to plot the information about", "import Path class ResultParam(): \"\"\"Result Parameter\"\"\" def __init__(self, path: Path,", "plot_dir: Path): \"\"\"generates box plots comparing two or more result", "and max_ > max_original else max_original ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches=\"tight\")", "\"\"\" metrics = ('DICE', 'HDRFDST') # the metrics we want", "we load two CSV (for simplicity, it is the same", "we want to plot the results for metrics_yaxis_limits = ((0.0,", "\"mia-result\\gridsearch_PKF/2020-12-11-09-51-54/no_PP/results.csv\"), \"no pp\")) results.append(ResultParam(Path(Path.cwd() /\"mia-result/gridsearch_PKF/2020-12-11-09-51-54/with_PP/PP-V-20_0-BG-True/results.csv\"), \"with pp\")) main(results, Path(Path.cwd() /", "# some parameters to improve the plot's readability title =", "raise ValueError('Metric \"{}\" unknown'.format(metric)) def main(results: [ResultParam], plot_dir: Path): \"\"\"generates", "= ('DICE', 'HDRFDST') # the metrics we want to plot", "else max_original ax.set_ylim(min_, max_) plt.savefig(file_path, bbox_inches=\"tight\") plt.close() def format_data(data, label:", "ax.boxplot(data, widths=0.6) set_box_format(bp, '000') # set and format litle, labels,", "distance (mm)' else: raise ValueError('Metric \"{}\" unknown'.format(metric)) def main(results: [ResultParam],", "*1.5, 5*1.5)) # figsize defaults to (width, height) =(6.4, 4.8),", "We usually want to compare different methods (e.g. a set", "same here) # todo: adapt to your needs to compare", "param_str: str): \"\"\"Initializes a new instance of the Result Parameter", "metric)), [format_data(df, label, metric) for df in dfs], title.format(label), 'Method',", "to compare different methods (e.g. a set of different features),", "don't use the x-label since it should be clear from", "boxplot(file_path: str, data: list, title: str, x_label: str, y_label: str," ]